diff options
Diffstat (limited to 'drivers/dma')
37 files changed, 2454 insertions, 622 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index b4584757d..e6cd1a320 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig @@ -229,7 +229,7 @@ config IMX_SDMA Support the i.MX SDMA engine. This engine is integrated into Freescale i.MX25/31/35/51/53/6 chips. -config IDMA64 +config INTEL_IDMA64 tristate "Intel integrated DMA 64-bit support" select DMA_ENGINE select DMA_VIRTUAL_CHANNELS @@ -486,7 +486,7 @@ config TI_EDMA depends on ARCH_DAVINCI || ARCH_OMAP || ARCH_KEYSTONE select DMA_ENGINE select DMA_VIRTUAL_CHANNELS - select TI_PRIV_EDMA + select TI_DMA_CROSSBAR if ARCH_OMAP default n help Enable support for the TI EDMA controller. This DMA diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 7711a7180..ef9c099bd 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile @@ -34,7 +34,7 @@ obj-$(CONFIG_HSU_DMA) += hsu/ obj-$(CONFIG_IMG_MDC_DMA) += img-mdc-dma.o obj-$(CONFIG_IMX_DMA) += imx-dma.o obj-$(CONFIG_IMX_SDMA) += imx-sdma.o -obj-$(CONFIG_IDMA64) += idma64.o +obj-$(CONFIG_INTEL_IDMA64) += idma64.o obj-$(CONFIG_INTEL_IOATDMA) += ioat/ obj-$(CONFIG_INTEL_IOP_ADMA) += iop-adma.o obj-$(CONFIG_INTEL_MIC_X100_DMA) += mic_x100_dma.o diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c index 5a635646e..16d0daa05 100644 --- a/drivers/dma/acpi-dma.c +++ b/drivers/dma/acpi-dma.c @@ -21,6 +21,7 @@ #include <linux/ioport.h> #include <linux/acpi.h> #include <linux/acpi_dma.h> +#include <linux/property.h> static LIST_HEAD(acpi_dma_list); static DEFINE_MUTEX(acpi_dma_lock); @@ -160,10 +161,8 @@ int acpi_dma_controller_register(struct device *dev, return -EINVAL; /* Check if the device was enumerated by ACPI */ - if (!ACPI_HANDLE(dev)) - return -EINVAL; - - if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + adev = ACPI_COMPANION(dev); + if (!adev) return -EINVAL; adma = kzalloc(sizeof(*adma), GFP_KERNEL); @@ -358,10 +357,11 @@ struct dma_chan *acpi_dma_request_slave_chan_by_index(struct device *dev, int found; /* Check if the device was enumerated by ACPI */ - if (!dev || !ACPI_HANDLE(dev)) + if (!dev) return ERR_PTR(-ENODEV); - if (acpi_bus_get_device(ACPI_HANDLE(dev), &adev)) + adev = ACPI_COMPANION(dev); + if (!adev) return ERR_PTR(-ENODEV); memset(&pdata, 0, sizeof(pdata)); @@ -413,21 +413,29 @@ EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_index); * translate the names "tx" and "rx" here based on the most common case where * the first FixedDMA descriptor is TX and second is RX. * + * If the device has "dma-names" property the FixedDMA descriptor indices + * are retrieved based on those. Otherwise the function falls back using + * hardcoded indices. + * * Return: * Pointer to appropriate dma channel on success or an error pointer. */ struct dma_chan *acpi_dma_request_slave_chan_by_name(struct device *dev, const char *name) { - size_t index; - - if (!strcmp(name, "tx")) - index = 0; - else if (!strcmp(name, "rx")) - index = 1; - else - return ERR_PTR(-ENODEV); + int index; + + index = device_property_match_string(dev, "dma-names", name); + if (index < 0) { + if (!strcmp(name, "tx")) + index = 0; + else if (!strcmp(name, "rx")) + index = 1; + else + return ERR_PTR(-ENODEV); + } + dev_dbg(dev, "found DMA channel \"%s\" at index %d\n", name, index); return acpi_dma_request_slave_chan_by_index(dev, index); } EXPORT_SYMBOL_GPL(acpi_dma_request_slave_chan_by_name); diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c index 58d406230..53d22eb73 100644 --- a/drivers/dma/at_hdmac.c +++ b/drivers/dma/at_hdmac.c @@ -458,10 +458,10 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) dma_cookie_complete(txd); /* If the transfer was a memset, free our temporary buffer */ - if (desc->memset) { + if (desc->memset_buffer) { dma_pool_free(atdma->memset_pool, desc->memset_vaddr, desc->memset_paddr); - desc->memset = false; + desc->memset_buffer = false; } /* move children to free_list */ @@ -729,8 +729,8 @@ atc_prep_dma_interleaved(struct dma_chan *chan, return NULL; dev_info(chan2dev(chan), - "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", - __func__, xt->src_start, xt->dst_start, xt->numf, + "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", + __func__, &xt->src_start, &xt->dst_start, xt->numf, xt->frame_size, flags); /* @@ -824,8 +824,8 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, u32 ctrla; u32 ctrlb; - dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n", - dest, src, len, flags); + dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n", + &dest, &src, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n"); @@ -881,6 +881,46 @@ err_desc_get: return NULL; } +static struct at_desc *atc_create_memset_desc(struct dma_chan *chan, + dma_addr_t psrc, + dma_addr_t pdst, + size_t len) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_desc *desc; + size_t xfer_count; + + u32 ctrla = ATC_SRC_WIDTH(2) | ATC_DST_WIDTH(2); + u32 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN | + ATC_SRC_ADDR_MODE_FIXED | + ATC_DST_ADDR_MODE_INCR | + ATC_FC_MEM2MEM; + + xfer_count = len >> 2; + if (xfer_count > ATC_BTSIZE_MAX) { + dev_err(chan2dev(chan), "%s: buffer is too big\n", + __func__); + return NULL; + } + + desc = atc_desc_get(atchan); + if (!desc) { + dev_err(chan2dev(chan), "%s: can't get a descriptor\n", + __func__); + return NULL; + } + + desc->lli.saddr = psrc; + desc->lli.daddr = pdst; + desc->lli.ctrla = ctrla | xfer_count; + desc->lli.ctrlb = ctrlb; + + desc->txd.cookie = 0; + desc->len = len; + + return desc; +} + /** * atc_prep_dma_memset - prepare a memcpy operation * @chan: the channel to prepare operation on @@ -893,15 +933,13 @@ static struct dma_async_tx_descriptor * atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, size_t len, unsigned long flags) { - struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma *atdma = to_at_dma(chan->device); - struct at_desc *desc = NULL; - size_t xfer_count; - u32 ctrla; - u32 ctrlb; + struct at_desc *desc; + void __iomem *vaddr; + dma_addr_t paddr; - dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__, - dest, value, len, flags); + dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__, + &dest, value, len, flags); if (unlikely(!len)) { dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__); @@ -914,61 +952,117 @@ atc_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, return NULL; } - xfer_count = len >> 2; - if (xfer_count > ATC_BTSIZE_MAX) { - dev_err(chan2dev(chan), "%s: buffer is too big\n", + vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); + if (!vaddr) { + dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", __func__); return NULL; } + *(u32*)vaddr = value; - ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN - | ATC_SRC_ADDR_MODE_FIXED - | ATC_DST_ADDR_MODE_INCR - | ATC_FC_MEM2MEM; + desc = atc_create_memset_desc(chan, paddr, dest, len); + if (!desc) { + dev_err(chan2dev(chan), "%s: couldn't get a descriptor\n", + __func__); + goto err_free_buffer; + } - ctrla = ATC_SRC_WIDTH(2) | - ATC_DST_WIDTH(2); + desc->memset_paddr = paddr; + desc->memset_vaddr = vaddr; + desc->memset_buffer = true; - desc = atc_desc_get(atchan); - if (!desc) { - dev_err(chan2dev(chan), "%s: can't get a descriptor\n", + desc->txd.cookie = -EBUSY; + desc->total_len = len; + + /* set end-of-link on the descriptor */ + set_desc_eol(desc); + + desc->txd.flags = flags; + + return &desc->txd; + +err_free_buffer: + dma_pool_free(atdma->memset_pool, vaddr, paddr); + return NULL; +} + +static struct dma_async_tx_descriptor * +atc_prep_dma_memset_sg(struct dma_chan *chan, + struct scatterlist *sgl, + unsigned int sg_len, int value, + unsigned long flags) +{ + struct at_dma_chan *atchan = to_at_dma_chan(chan); + struct at_dma *atdma = to_at_dma(chan->device); + struct at_desc *desc = NULL, *first = NULL, *prev = NULL; + struct scatterlist *sg; + void __iomem *vaddr; + dma_addr_t paddr; + size_t total_len = 0; + int i; + + dev_vdbg(chan2dev(chan), "%s: v0x%x l0x%zx f0x%lx\n", __func__, + value, sg_len, flags); + + if (unlikely(!sgl || !sg_len)) { + dev_dbg(chan2dev(chan), "%s: scatterlist is empty!\n", __func__); return NULL; } - desc->memset_vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, - &desc->memset_paddr); - if (!desc->memset_vaddr) { + vaddr = dma_pool_alloc(atdma->memset_pool, GFP_ATOMIC, &paddr); + if (!vaddr) { dev_err(chan2dev(chan), "%s: couldn't allocate buffer\n", __func__); - goto err_put_desc; + return NULL; } + *(u32*)vaddr = value; - *desc->memset_vaddr = value; - desc->memset = true; + for_each_sg(sgl, sg, sg_len, i) { + dma_addr_t dest = sg_dma_address(sg); + size_t len = sg_dma_len(sg); - desc->lli.saddr = desc->memset_paddr; - desc->lli.daddr = dest; - desc->lli.ctrla = ctrla | xfer_count; - desc->lli.ctrlb = ctrlb; + dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n", + __func__, &dest, len); - desc->txd.cookie = -EBUSY; - desc->len = len; - desc->total_len = len; + if (!is_dma_fill_aligned(chan->device, dest, 0, len)) { + dev_err(chan2dev(chan), "%s: buffer is not aligned\n", + __func__); + goto err_put_desc; + } + + desc = atc_create_memset_desc(chan, paddr, dest, len); + if (!desc) + goto err_put_desc; + + atc_desc_chain(&first, &prev, desc); + + total_len += len; + } + + /* + * Only set the buffer pointers on the last descriptor to + * avoid free'ing while we have our transfer still going + */ + desc->memset_paddr = paddr; + desc->memset_vaddr = vaddr; + desc->memset_buffer = true; + + first->txd.cookie = -EBUSY; + first->total_len = total_len; /* set end-of-link on the descriptor */ set_desc_eol(desc); - desc->txd.flags = flags; + first->txd.flags = flags; - return &desc->txd; + return &first->txd; err_put_desc: - atc_desc_put(atchan, desc); + atc_desc_put(atchan, first); return NULL; } - /** * atc_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction * @chan: DMA channel @@ -1345,9 +1439,9 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, unsigned int periods = buf_len / period_len; unsigned int i; - dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n", + dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n", direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE", - buf_addr, + &buf_addr, periods, buf_len, period_len); if (unlikely(!atslave || !buf_len || !period_len)) { @@ -1851,6 +1945,7 @@ static int __init at_dma_probe(struct platform_device *pdev) dma_cap_set(DMA_INTERLEAVE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMCPY, at91sam9g45_config.cap_mask); dma_cap_set(DMA_MEMSET, at91sam9g45_config.cap_mask); + dma_cap_set(DMA_MEMSET_SG, at91sam9g45_config.cap_mask); dma_cap_set(DMA_PRIVATE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_SLAVE, at91sam9g45_config.cap_mask); dma_cap_set(DMA_SG, at91sam9g45_config.cap_mask); @@ -1972,6 +2067,7 @@ static int __init at_dma_probe(struct platform_device *pdev) if (dma_has_cap(DMA_MEMSET, atdma->dma_common.cap_mask)) { atdma->dma_common.device_prep_dma_memset = atc_prep_dma_memset; + atdma->dma_common.device_prep_dma_memset_sg = atc_prep_dma_memset_sg; atdma->dma_common.fill_align = DMAENGINE_ALIGN_4_BYTES; } diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h index c3bebbe89..7f58f0615 100644 --- a/drivers/dma/at_hdmac_regs.h +++ b/drivers/dma/at_hdmac_regs.h @@ -202,7 +202,7 @@ struct at_desc { size_t src_hole; /* Memset temporary buffer */ - bool memset; + bool memset_buffer; dma_addr_t memset_paddr; int *memset_vaddr; }; @@ -385,9 +385,9 @@ static void vdbg_dump_regs(struct at_dma_chan *atchan) {} static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli) { dev_crit(chan2dev(&atchan->chan_common), - " desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n", - lli->saddr, lli->daddr, - lli->ctrla, lli->ctrlb, lli->dscr); + " desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n", + &lli->saddr, &lli->daddr, + lli->ctrla, lli->ctrlb, &lli->dscr); } diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c index dd24375b7..370c661c7 100644 --- a/drivers/dma/at_xdmac.c +++ b/drivers/dma/at_xdmac.c @@ -156,7 +156,7 @@ #define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */ #define AT_XDMAC_CC_WRIP_DONE (0x0 << 23) #define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23) -#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */ +#define AT_XDMAC_CC_PERID(i) (0x7f & (i) << 24) /* Channel Peripheral Identifier */ #define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */ #define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */ #define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */ @@ -920,8 +920,8 @@ at_xdmac_interleaved_queue_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, desc->lld.mbr_sa, desc->lld.mbr_da, + "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg); /* Chain lld. */ @@ -938,82 +938,84 @@ at_xdmac_prep_interleaved(struct dma_chan *chan, { struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *prev = NULL, *first = NULL; - struct data_chunk *chunk, *prev_chunk = NULL; dma_addr_t dst_addr, src_addr; - size_t dst_skip, src_skip, len = 0; - size_t prev_dst_icg = 0, prev_src_icg = 0; + size_t src_skip = 0, dst_skip = 0, len = 0; + struct data_chunk *chunk; int i; - if (!xt || (xt->numf != 1) || (xt->dir != DMA_MEM_TO_MEM)) + if (!xt || !xt->numf || (xt->dir != DMA_MEM_TO_MEM)) return NULL; - dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n", - __func__, xt->src_start, xt->dst_start, xt->numf, + /* + * TODO: Handle the case where we have to repeat a chain of + * descriptors... + */ + if ((xt->numf > 1) && (xt->frame_size > 1)) + return NULL; + + dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n", + __func__, &xt->src_start, &xt->dst_start, xt->numf, xt->frame_size, flags); src_addr = xt->src_start; dst_addr = xt->dst_start; - for (i = 0; i < xt->frame_size; i++) { - struct at_xdmac_desc *desc; - size_t src_icg, dst_icg; + if (xt->numf > 1) { + first = at_xdmac_interleaved_queue_desc(chan, atchan, + NULL, + src_addr, dst_addr, + xt, xt->sgl); - chunk = xt->sgl + i; + /* Length of the block is (BLEN+1) microblocks. */ + for (i = 0; i < xt->numf - 1; i++) + at_xdmac_increment_block_count(chan, first); - dst_icg = dmaengine_get_dst_icg(xt, chunk); - src_icg = dmaengine_get_src_icg(xt, chunk); + dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, first, first); + list_add_tail(&first->desc_node, &first->descs_list); + } else { + for (i = 0; i < xt->frame_size; i++) { + size_t src_icg = 0, dst_icg = 0; + struct at_xdmac_desc *desc; - src_skip = chunk->size + src_icg; - dst_skip = chunk->size + dst_icg; + chunk = xt->sgl + i; - dev_dbg(chan2dev(chan), - "%s: chunk size=%d, src icg=%d, dst icg=%d\n", - __func__, chunk->size, src_icg, dst_icg); + dst_icg = dmaengine_get_dst_icg(xt, chunk); + src_icg = dmaengine_get_src_icg(xt, chunk); - /* - * Handle the case where we just have the same - * transfer to setup, we can just increase the - * block number and reuse the same descriptor. - */ - if (prev_chunk && prev && - (prev_chunk->size == chunk->size) && - (prev_src_icg == src_icg) && - (prev_dst_icg == dst_icg)) { - dev_dbg(chan2dev(chan), - "%s: same configuration that the previous chunk, merging the descriptors...\n", - __func__); - at_xdmac_increment_block_count(chan, prev); - continue; - } + src_skip = chunk->size + src_icg; + dst_skip = chunk->size + dst_icg; - desc = at_xdmac_interleaved_queue_desc(chan, atchan, - prev, - src_addr, dst_addr, - xt, chunk); - if (!desc) { - list_splice_init(&first->descs_list, - &atchan->free_descs_list); - return NULL; - } + dev_dbg(chan2dev(chan), + "%s: chunk size=%d, src icg=%d, dst icg=%d\n", + __func__, chunk->size, src_icg, dst_icg); + + desc = at_xdmac_interleaved_queue_desc(chan, atchan, + prev, + src_addr, dst_addr, + xt, chunk); + if (!desc) { + list_splice_init(&first->descs_list, + &atchan->free_descs_list); + return NULL; + } - if (!first) - first = desc; + if (!first) + first = desc; - dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", - __func__, desc, first); - list_add_tail(&desc->desc_node, &first->descs_list); + dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n", + __func__, desc, first); + list_add_tail(&desc->desc_node, &first->descs_list); - if (xt->src_sgl) - src_addr += src_skip; + if (xt->src_sgl) + src_addr += src_skip; - if (xt->dst_sgl) - dst_addr += dst_skip; + if (xt->dst_sgl) + dst_addr += dst_skip; - len += chunk->size; - prev_chunk = chunk; - prev_dst_icg = dst_icg; - prev_src_icg = src_icg; - prev = desc; + len += chunk->size; + prev = desc; + } } first->tx_dma_desc.cookie = -EBUSY; @@ -1086,6 +1088,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, /* Check remaining length and change data width if needed. */ dwidth = at_xdmac_align_width(chan, src_addr | dst_addr | xfer_size); + chan_cc &= ~AT_XDMAC_CC_DWIDTH_MASK; chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth); ublen = xfer_size >> dwidth; @@ -1179,8 +1182,8 @@ static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan, desc->lld.mbr_cfg = chan_cc; dev_dbg(chan2dev(chan), - "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", - __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc, + "%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n", + __func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc, desc->lld.mbr_cfg); return desc; @@ -1193,8 +1196,8 @@ at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value, struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan); struct at_xdmac_desc *desc; - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", - __func__, dest, len, value, flags); + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", + __func__, &dest, len, value, flags); if (unlikely(!len)) return NULL; @@ -1229,8 +1232,8 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, /* Prepare descriptors. */ for_each_sg(sgl, sg, sg_len, i) { - dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n", - __func__, sg_dma_address(sg), sg_dma_len(sg), + dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n", + __func__, &sg_dma_address(sg), sg_dma_len(sg), value, flags); desc = at_xdmac_memset_create_desc(chan, atchan, sg_dma_address(sg), @@ -1333,7 +1336,7 @@ at_xdmac_prep_dma_memset_sg(struct dma_chan *chan, struct scatterlist *sgl, * since we don't care about the stride anymore. */ if ((i == (sg_len - 1)) && - sg_dma_len(ppsg) == sg_dma_len(psg)) { + sg_dma_len(psg) == sg_dma_len(sg)) { dev_dbg(chan2dev(chan), "%s: desc 0x%p can be merged with desc 0x%p\n", __func__, desc, pdesc); diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c index c92d6a70c..996c4b00d 100644 --- a/drivers/dma/bcm2835-dma.c +++ b/drivers/dma/bcm2835-dma.c @@ -31,6 +31,7 @@ */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> +#include <linux/dmapool.h> #include <linux/err.h> #include <linux/init.h> #include <linux/interrupt.h> @@ -62,6 +63,11 @@ struct bcm2835_dma_cb { uint32_t pad[2]; }; +struct bcm2835_cb_entry { + struct bcm2835_dma_cb *cb; + dma_addr_t paddr; +}; + struct bcm2835_chan { struct virt_dma_chan vc; struct list_head node; @@ -72,18 +78,18 @@ struct bcm2835_chan { int ch; struct bcm2835_desc *desc; + struct dma_pool *cb_pool; void __iomem *chan_base; int irq_number; }; struct bcm2835_desc { + struct bcm2835_chan *c; struct virt_dma_desc vd; enum dma_transfer_direction dir; - unsigned int control_block_size; - struct bcm2835_dma_cb *control_block_base; - dma_addr_t control_block_base_phys; + struct bcm2835_cb_entry *cb_list; unsigned int frames; size_t size; @@ -143,10 +149,13 @@ static inline struct bcm2835_desc *to_bcm2835_dma_desc( static void bcm2835_dma_desc_free(struct virt_dma_desc *vd) { struct bcm2835_desc *desc = container_of(vd, struct bcm2835_desc, vd); - dma_free_coherent(desc->vd.tx.chan->device->dev, - desc->control_block_size, - desc->control_block_base, - desc->control_block_base_phys); + int i; + + for (i = 0; i < desc->frames; i++) + dma_pool_free(desc->c->cb_pool, desc->cb_list[i].cb, + desc->cb_list[i].paddr); + + kfree(desc->cb_list); kfree(desc); } @@ -199,7 +208,7 @@ static void bcm2835_dma_start_desc(struct bcm2835_chan *c) c->desc = d = to_bcm2835_dma_desc(&vd->tx); - writel(d->control_block_base_phys, c->chan_base + BCM2835_DMA_ADDR); + writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR); writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS); } @@ -232,9 +241,16 @@ static irqreturn_t bcm2835_dma_callback(int irq, void *data) static int bcm2835_dma_alloc_chan_resources(struct dma_chan *chan) { struct bcm2835_chan *c = to_bcm2835_dma_chan(chan); + struct device *dev = c->vc.chan.device->dev; + + dev_dbg(dev, "Allocating DMA channel %d\n", c->ch); - dev_dbg(c->vc.chan.device->dev, - "Allocating DMA channel %d\n", c->ch); + c->cb_pool = dma_pool_create(dev_name(dev), dev, + sizeof(struct bcm2835_dma_cb), 0, 0); + if (!c->cb_pool) { + dev_err(dev, "unable to allocate descriptor pool\n"); + return -ENOMEM; + } return request_irq(c->irq_number, bcm2835_dma_callback, 0, "DMA IRQ", c); @@ -246,6 +262,7 @@ static void bcm2835_dma_free_chan_resources(struct dma_chan *chan) vchan_free_chan_resources(&c->vc); free_irq(c->irq_number, c); + dma_pool_destroy(c->cb_pool); dev_dbg(c->vc.chan.device->dev, "Freeing DMA channel %u\n", c->ch); } @@ -261,8 +278,7 @@ static size_t bcm2835_dma_desc_size_pos(struct bcm2835_desc *d, dma_addr_t addr) size_t size; for (size = i = 0; i < d->frames; i++) { - struct bcm2835_dma_cb *control_block = - &d->control_block_base[i]; + struct bcm2835_dma_cb *control_block = d->cb_list[i].cb; size_t this_size = control_block->length; dma_addr_t dma; @@ -343,6 +359,7 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( dma_addr_t dev_addr; unsigned int es, sync_type; unsigned int frame; + int i; /* Grab configuration */ if (!is_slave_direction(direction)) { @@ -374,27 +391,31 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( if (!d) return NULL; + d->c = c; d->dir = direction; d->frames = buf_len / period_len; - /* Allocate memory for control blocks */ - d->control_block_size = d->frames * sizeof(struct bcm2835_dma_cb); - d->control_block_base = dma_zalloc_coherent(chan->device->dev, - d->control_block_size, &d->control_block_base_phys, - GFP_NOWAIT); - - if (!d->control_block_base) { + d->cb_list = kcalloc(d->frames, sizeof(*d->cb_list), GFP_KERNEL); + if (!d->cb_list) { kfree(d); return NULL; } + /* Allocate memory for control blocks */ + for (i = 0; i < d->frames; i++) { + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; + + cb_entry->cb = dma_pool_zalloc(c->cb_pool, GFP_ATOMIC, + &cb_entry->paddr); + if (!cb_entry->cb) + goto error_cb; + } /* * Iterate over all frames, create a control block * for each frame and link them together. */ for (frame = 0; frame < d->frames; frame++) { - struct bcm2835_dma_cb *control_block = - &d->control_block_base[frame]; + struct bcm2835_dma_cb *control_block = d->cb_list[frame].cb; /* Setup adresses */ if (d->dir == DMA_DEV_TO_MEM) { @@ -428,12 +449,21 @@ static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_cyclic( * This DMA engine driver currently only supports cyclic DMA. * Therefore, wrap around at number of frames. */ - control_block->next = d->control_block_base_phys + - sizeof(struct bcm2835_dma_cb) - * ((frame + 1) % d->frames); + control_block->next = d->cb_list[((frame + 1) % d->frames)].paddr; } return vchan_tx_prep(&c->vc, &d->vd, flags); +error_cb: + i--; + for (; i >= 0; i--) { + struct bcm2835_cb_entry *cb_entry = &d->cb_list[i]; + + dma_pool_free(c->cb_pool, cb_entry->cb, cb_entry->paddr); + } + + kfree(d->cb_list); + kfree(d); + return NULL; } static int bcm2835_dma_slave_config(struct dma_chan *chan, diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 09479d4be..3ecec1445 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -1074,11 +1074,9 @@ static void dmaengine_destroy_unmap_pool(void) for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) { struct dmaengine_unmap_pool *p = &unmap_pool[i]; - if (p->pool) - mempool_destroy(p->pool); + mempool_destroy(p->pool); p->pool = NULL; - if (p->cache) - kmem_cache_destroy(p->cache); + kmem_cache_destroy(p->cache); p->cache = NULL; } } diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c index bedce038c..7067b6ddc 100644 --- a/drivers/dma/dw/core.c +++ b/drivers/dma/dw/core.c @@ -163,7 +163,7 @@ static void dwc_initialize(struct dw_dma_chan *dwc) /*----------------------------------------------------------------------*/ -static inline unsigned int dwc_fast_fls(unsigned long long v) +static inline unsigned int dwc_fast_ffs(unsigned long long v) { /* * We can be a lot more clever here, but this should take care @@ -712,7 +712,7 @@ dwc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, dw->data_width[dwc->dst_master]); src_width = dst_width = min_t(unsigned int, data_width, - dwc_fast_fls(src | dest | len)); + dwc_fast_ffs(src | dest | len)); ctllo = DWC_DEFAULT_CTLLO(chan) | DWC_CTLL_DST_WIDTH(dst_width) @@ -791,7 +791,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, switch (direction) { case DMA_MEM_TO_DEV: - reg_width = __fls(sconfig->dst_addr_width); + reg_width = __ffs(sconfig->dst_addr_width); reg = sconfig->dst_addr; ctllo = (DWC_DEFAULT_CTLLO(chan) | DWC_CTLL_DST_WIDTH(reg_width) @@ -811,7 +811,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, len = sg_dma_len(sg); mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); + data_width, dwc_fast_ffs(mem | len)); slave_sg_todev_fill_desc: desc = dwc_desc_get(dwc); @@ -848,7 +848,7 @@ slave_sg_todev_fill_desc: } break; case DMA_DEV_TO_MEM: - reg_width = __fls(sconfig->src_addr_width); + reg_width = __ffs(sconfig->src_addr_width); reg = sconfig->src_addr; ctllo = (DWC_DEFAULT_CTLLO(chan) | DWC_CTLL_SRC_WIDTH(reg_width) @@ -868,7 +868,7 @@ slave_sg_todev_fill_desc: len = sg_dma_len(sg); mem_width = min_t(unsigned int, - data_width, dwc_fast_fls(mem | len)); + data_width, dwc_fast_ffs(mem | len)); slave_sg_fromdev_fill_desc: desc = dwc_desc_get(dwc); @@ -1499,9 +1499,8 @@ EXPORT_SYMBOL(dw_dma_cyclic_free); int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) { struct dw_dma *dw; - bool autocfg; + bool autocfg = false; unsigned int dw_params; - unsigned int nr_channels; unsigned int max_blk_size = 0; int err; int i; @@ -1515,33 +1514,42 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) pm_runtime_get_sync(chip->dev); - dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); - autocfg = dw_params >> DW_PARAMS_EN & 0x1; + if (!pdata) { + dw_params = dma_read_byaddr(chip->regs, DW_PARAMS); + dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); - dev_dbg(chip->dev, "DW_PARAMS: 0x%08x\n", dw_params); + autocfg = dw_params >> DW_PARAMS_EN & 1; + if (!autocfg) { + err = -EINVAL; + goto err_pdata; + } - if (!pdata && autocfg) { pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { err = -ENOMEM; goto err_pdata; } + /* Get hardware configuration parameters */ + pdata->nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 7) + 1; + pdata->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; + for (i = 0; i < pdata->nr_masters; i++) { + pdata->data_width[i] = + (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; + } + max_blk_size = dma_readl(dw, MAX_BLK_SIZE); + /* Fill platform data with the default values */ pdata->is_private = true; + pdata->is_memcpy = true; pdata->chan_allocation_order = CHAN_ALLOCATION_ASCENDING; pdata->chan_priority = CHAN_PRIORITY_ASCENDING; - } else if (!pdata || pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { + } else if (pdata->nr_channels > DW_DMA_MAX_NR_CHANNELS) { err = -EINVAL; goto err_pdata; } - if (autocfg) - nr_channels = (dw_params >> DW_PARAMS_NR_CHAN & 0x7) + 1; - else - nr_channels = pdata->nr_channels; - - dw->chan = devm_kcalloc(chip->dev, nr_channels, sizeof(*dw->chan), + dw->chan = devm_kcalloc(chip->dev, pdata->nr_channels, sizeof(*dw->chan), GFP_KERNEL); if (!dw->chan) { err = -ENOMEM; @@ -1549,22 +1557,12 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) } /* Get hardware configuration parameters */ - if (autocfg) { - max_blk_size = dma_readl(dw, MAX_BLK_SIZE); - - dw->nr_masters = (dw_params >> DW_PARAMS_NR_MASTER & 3) + 1; - for (i = 0; i < dw->nr_masters; i++) { - dw->data_width[i] = - (dw_params >> DW_PARAMS_DATA_WIDTH(i) & 3) + 2; - } - } else { - dw->nr_masters = pdata->nr_masters; - for (i = 0; i < dw->nr_masters; i++) - dw->data_width[i] = pdata->data_width[i]; - } + dw->nr_masters = pdata->nr_masters; + for (i = 0; i < dw->nr_masters; i++) + dw->data_width[i] = pdata->data_width[i]; /* Calculate all channel mask before DMA setup */ - dw->all_chan_mask = (1 << nr_channels) - 1; + dw->all_chan_mask = (1 << pdata->nr_channels) - 1; /* Force dma off, just in case */ dw_dma_off(dw); @@ -1589,7 +1587,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) goto err_pdata; INIT_LIST_HEAD(&dw->dma.channels); - for (i = 0; i < nr_channels; i++) { + for (i = 0; i < pdata->nr_channels; i++) { struct dw_dma_chan *dwc = &dw->chan[i]; dwc->chan.device = &dw->dma; @@ -1602,7 +1600,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) /* 7 is highest priority & 0 is lowest. */ if (pdata->chan_priority == CHAN_PRIORITY_ASCENDING) - dwc->priority = nr_channels - i - 1; + dwc->priority = pdata->nr_channels - i - 1; else dwc->priority = i; @@ -1656,10 +1654,13 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) dma_writel(dw, CLEAR.DST_TRAN, dw->all_chan_mask); dma_writel(dw, CLEAR.ERROR, dw->all_chan_mask); - dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + /* Set capabilities */ dma_cap_set(DMA_SLAVE, dw->dma.cap_mask); if (pdata->is_private) dma_cap_set(DMA_PRIVATE, dw->dma.cap_mask); + if (pdata->is_memcpy) + dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask); + dw->dma.dev = chip->dev; dw->dma.device_alloc_chan_resources = dwc_alloc_chan_resources; dw->dma.device_free_chan_resources = dwc_free_chan_resources; @@ -1687,7 +1688,7 @@ int dw_dma_probe(struct dw_dma_chip *chip, struct dw_dma_platform_data *pdata) goto err_dma_register; dev_info(chip->dev, "DesignWare DMA Controller, %d channels\n", - nr_channels); + pdata->nr_channels); pm_runtime_put_sync_suspend(chip->dev); diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c index b144706b3..4c30fdd09 100644 --- a/drivers/dma/dw/pci.c +++ b/drivers/dma/dw/pci.c @@ -15,12 +15,6 @@ #include "internal.h" -static struct dw_dma_platform_data dw_pci_pdata = { - .is_private = 1, - .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, - .chan_priority = CHAN_PRIORITY_ASCENDING, -}; - static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid) { struct dw_dma_chip *chip; @@ -101,19 +95,19 @@ static const struct dev_pm_ops dw_pci_dev_pm_ops = { static const struct pci_device_id dw_pci_id_table[] = { /* Medfield */ - { PCI_VDEVICE(INTEL, 0x0827), (kernel_ulong_t)&dw_pci_pdata }, - { PCI_VDEVICE(INTEL, 0x0830), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x0827) }, + { PCI_VDEVICE(INTEL, 0x0830) }, /* BayTrail */ - { PCI_VDEVICE(INTEL, 0x0f06), (kernel_ulong_t)&dw_pci_pdata }, - { PCI_VDEVICE(INTEL, 0x0f40), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x0f06) }, + { PCI_VDEVICE(INTEL, 0x0f40) }, /* Braswell */ - { PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_pdata }, - { PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x2286) }, + { PCI_VDEVICE(INTEL, 0x22c0) }, /* Haswell */ - { PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_pdata }, + { PCI_VDEVICE(INTEL, 0x9c60) }, { } }; MODULE_DEVICE_TABLE(pci, dw_pci_id_table); diff --git a/drivers/dma/dw/platform.c b/drivers/dma/dw/platform.c index b2c3ae071..68a481575 100644 --- a/drivers/dma/dw/platform.c +++ b/drivers/dma/dw/platform.c @@ -155,6 +155,7 @@ static int dw_probe(struct platform_device *pdev) struct dw_dma_chip *chip; struct device *dev = &pdev->dev; struct resource *mem; + const struct acpi_device_id *id; struct dw_dma_platform_data *pdata; int err; @@ -178,6 +179,11 @@ static int dw_probe(struct platform_device *pdev) pdata = dev_get_platdata(dev); if (!pdata) pdata = dw_dma_parse_dt(pdev); + if (!pdata && has_acpi_companion(dev)) { + id = acpi_match_device(dev->driver->acpi_match_table, dev); + if (id) + pdata = (struct dw_dma_platform_data *)id->driver_data; + } chip->dev = dev; @@ -246,8 +252,17 @@ MODULE_DEVICE_TABLE(of, dw_dma_of_id_table); #endif #ifdef CONFIG_ACPI +static struct dw_dma_platform_data dw_dma_acpi_pdata = { + .nr_channels = 8, + .is_private = true, + .chan_allocation_order = CHAN_ALLOCATION_ASCENDING, + .chan_priority = CHAN_PRIORITY_ASCENDING, + .block_size = 4095, + .nr_masters = 2, +}; + static const struct acpi_device_id dw_dma_acpi_id_table[] = { - { "INTL9C60", 0 }, + { "INTL9C60", (kernel_ulong_t)&dw_dma_acpi_pdata }, { } }; MODULE_DEVICE_TABLE(acpi, dw_dma_acpi_id_table); diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c index 3e5d4f193..16fe773fb 100644 --- a/drivers/dma/edma.c +++ b/drivers/dma/edma.c @@ -25,28 +25,93 @@ #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/of.h> +#include <linux/of_dma.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/pm_runtime.h> #include <linux/platform_data/edma.h> #include "dmaengine.h" #include "virt-dma.h" -/* - * This will go away when the private EDMA API is folded - * into this driver and the platform device(s) are - * instantiated in the arch code. We can only get away - * with this simplification because DA8XX may not be built - * in the same kernel image with other DaVinci parts. This - * avoids having to sprinkle dmaengine driver platform devices - * and data throughout all the existing board files. - */ -#ifdef CONFIG_ARCH_DAVINCI_DA8XX -#define EDMA_CTLRS 2 -#define EDMA_CHANS 32 -#else -#define EDMA_CTLRS 1 -#define EDMA_CHANS 64 -#endif /* CONFIG_ARCH_DAVINCI_DA8XX */ +/* Offsets matching "struct edmacc_param" */ +#define PARM_OPT 0x00 +#define PARM_SRC 0x04 +#define PARM_A_B_CNT 0x08 +#define PARM_DST 0x0c +#define PARM_SRC_DST_BIDX 0x10 +#define PARM_LINK_BCNTRLD 0x14 +#define PARM_SRC_DST_CIDX 0x18 +#define PARM_CCNT 0x1c + +#define PARM_SIZE 0x20 + +/* Offsets for EDMA CC global channel registers and their shadows */ +#define SH_ER 0x00 /* 64 bits */ +#define SH_ECR 0x08 /* 64 bits */ +#define SH_ESR 0x10 /* 64 bits */ +#define SH_CER 0x18 /* 64 bits */ +#define SH_EER 0x20 /* 64 bits */ +#define SH_EECR 0x28 /* 64 bits */ +#define SH_EESR 0x30 /* 64 bits */ +#define SH_SER 0x38 /* 64 bits */ +#define SH_SECR 0x40 /* 64 bits */ +#define SH_IER 0x50 /* 64 bits */ +#define SH_IECR 0x58 /* 64 bits */ +#define SH_IESR 0x60 /* 64 bits */ +#define SH_IPR 0x68 /* 64 bits */ +#define SH_ICR 0x70 /* 64 bits */ +#define SH_IEVAL 0x78 +#define SH_QER 0x80 +#define SH_QEER 0x84 +#define SH_QEECR 0x88 +#define SH_QEESR 0x8c +#define SH_QSER 0x90 +#define SH_QSECR 0x94 +#define SH_SIZE 0x200 + +/* Offsets for EDMA CC global registers */ +#define EDMA_REV 0x0000 +#define EDMA_CCCFG 0x0004 +#define EDMA_QCHMAP 0x0200 /* 8 registers */ +#define EDMA_DMAQNUM 0x0240 /* 8 registers (4 on OMAP-L1xx) */ +#define EDMA_QDMAQNUM 0x0260 +#define EDMA_QUETCMAP 0x0280 +#define EDMA_QUEPRI 0x0284 +#define EDMA_EMR 0x0300 /* 64 bits */ +#define EDMA_EMCR 0x0308 /* 64 bits */ +#define EDMA_QEMR 0x0310 +#define EDMA_QEMCR 0x0314 +#define EDMA_CCERR 0x0318 +#define EDMA_CCERRCLR 0x031c +#define EDMA_EEVAL 0x0320 +#define EDMA_DRAE 0x0340 /* 4 x 64 bits*/ +#define EDMA_QRAE 0x0380 /* 4 registers */ +#define EDMA_QUEEVTENTRY 0x0400 /* 2 x 16 registers */ +#define EDMA_QSTAT 0x0600 /* 2 registers */ +#define EDMA_QWMTHRA 0x0620 +#define EDMA_QWMTHRB 0x0624 +#define EDMA_CCSTAT 0x0640 + +#define EDMA_M 0x1000 /* global channel registers */ +#define EDMA_ECR 0x1008 +#define EDMA_ECRH 0x100C +#define EDMA_SHADOW0 0x2000 /* 4 shadow regions */ +#define EDMA_PARM 0x4000 /* PaRAM entries */ + +#define PARM_OFFSET(param_no) (EDMA_PARM + ((param_no) << 5)) + +#define EDMA_DCHMAP 0x0100 /* 64 registers */ + +/* CCCFG register */ +#define GET_NUM_DMACH(x) (x & 0x7) /* bits 0-2 */ +#define GET_NUM_QDMACH(x) ((x & 0x70) >> 4) /* bits 4-6 */ +#define GET_NUM_PAENTRY(x) ((x & 0x7000) >> 12) /* bits 12-14 */ +#define GET_NUM_EVQUE(x) ((x & 0x70000) >> 16) /* bits 16-18 */ +#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */ +#define CHMAP_EXIST BIT(24) /* * Max of 20 segments per channel to conserve PaRAM slots @@ -59,6 +124,37 @@ #define EDMA_MAX_SLOTS MAX_NR_SG #define EDMA_DESCRIPTORS 16 +#define EDMA_CHANNEL_ANY -1 /* for edma_alloc_channel() */ +#define EDMA_SLOT_ANY -1 /* for edma_alloc_slot() */ +#define EDMA_CONT_PARAMS_ANY 1001 +#define EDMA_CONT_PARAMS_FIXED_EXACT 1002 +#define EDMA_CONT_PARAMS_FIXED_NOT_EXACT 1003 + +/* PaRAM slots are laid out like this */ +struct edmacc_param { + u32 opt; + u32 src; + u32 a_b_cnt; + u32 dst; + u32 src_dst_bidx; + u32 link_bcntrld; + u32 src_dst_cidx; + u32 ccnt; +} __packed; + +/* fields in edmacc_param.opt */ +#define SAM BIT(0) +#define DAM BIT(1) +#define SYNCDIM BIT(2) +#define STATIC BIT(3) +#define EDMA_FWID (0x07 << 8) +#define TCCMODE BIT(11) +#define EDMA_TCC(t) ((t) << 12) +#define TCINTEN BIT(20) +#define ITCINTEN BIT(21) +#define TCCHEN BIT(22) +#define ITCCHEN BIT(23) + struct edma_pset { u32 len; dma_addr_t addr; @@ -105,26 +201,524 @@ struct edma_desc { struct edma_cc; +struct edma_tc { + struct device_node *node; + u16 id; +}; + struct edma_chan { struct virt_dma_chan vchan; struct list_head node; struct edma_desc *edesc; struct edma_cc *ecc; + struct edma_tc *tc; int ch_num; bool alloced; + bool hw_triggered; int slot[EDMA_MAX_SLOTS]; int missed; struct dma_slave_config cfg; }; struct edma_cc { - int ctlr; + struct device *dev; + struct edma_soc_info *info; + void __iomem *base; + int id; + bool legacy_mode; + + /* eDMA3 resource information */ + unsigned num_channels; + unsigned num_qchannels; + unsigned num_region; + unsigned num_slots; + unsigned num_tc; + bool chmap_exist; + enum dma_event_q default_queue; + + /* + * The slot_inuse bit for each PaRAM slot is clear unless the slot is + * in use by Linux or if it is allocated to be used by DSP. + */ + unsigned long *slot_inuse; + struct dma_device dma_slave; - struct edma_chan slave_chans[EDMA_CHANS]; - int num_slave_chans; + struct dma_device *dma_memcpy; + struct edma_chan *slave_chans; + struct edma_tc *tc_list; int dummy_slot; }; +/* dummy param set used to (re)initialize parameter RAM slots */ +static const struct edmacc_param dummy_paramset = { + .link_bcntrld = 0xffff, + .ccnt = 1, +}; + +#define EDMA_BINDING_LEGACY 0 +#define EDMA_BINDING_TPCC 1 +static const struct of_device_id edma_of_ids[] = { + { + .compatible = "ti,edma3", + .data = (void *)EDMA_BINDING_LEGACY, + }, + { + .compatible = "ti,edma3-tpcc", + .data = (void *)EDMA_BINDING_TPCC, + }, + {} +}; + +static const struct of_device_id edma_tptc_of_ids[] = { + { .compatible = "ti,edma3-tptc", }, + {} +}; + +static inline unsigned int edma_read(struct edma_cc *ecc, int offset) +{ + return (unsigned int)__raw_readl(ecc->base + offset); +} + +static inline void edma_write(struct edma_cc *ecc, int offset, int val) +{ + __raw_writel(val, ecc->base + offset); +} + +static inline void edma_modify(struct edma_cc *ecc, int offset, unsigned and, + unsigned or) +{ + unsigned val = edma_read(ecc, offset); + + val &= and; + val |= or; + edma_write(ecc, offset, val); +} + +static inline void edma_and(struct edma_cc *ecc, int offset, unsigned and) +{ + unsigned val = edma_read(ecc, offset); + + val &= and; + edma_write(ecc, offset, val); +} + +static inline void edma_or(struct edma_cc *ecc, int offset, unsigned or) +{ + unsigned val = edma_read(ecc, offset); + + val |= or; + edma_write(ecc, offset, val); +} + +static inline unsigned int edma_read_array(struct edma_cc *ecc, int offset, + int i) +{ + return edma_read(ecc, offset + (i << 2)); +} + +static inline void edma_write_array(struct edma_cc *ecc, int offset, int i, + unsigned val) +{ + edma_write(ecc, offset + (i << 2), val); +} + +static inline void edma_modify_array(struct edma_cc *ecc, int offset, int i, + unsigned and, unsigned or) +{ + edma_modify(ecc, offset + (i << 2), and, or); +} + +static inline void edma_or_array(struct edma_cc *ecc, int offset, int i, + unsigned or) +{ + edma_or(ecc, offset + (i << 2), or); +} + +static inline void edma_or_array2(struct edma_cc *ecc, int offset, int i, int j, + unsigned or) +{ + edma_or(ecc, offset + ((i * 2 + j) << 2), or); +} + +static inline void edma_write_array2(struct edma_cc *ecc, int offset, int i, + int j, unsigned val) +{ + edma_write(ecc, offset + ((i * 2 + j) << 2), val); +} + +static inline unsigned int edma_shadow0_read(struct edma_cc *ecc, int offset) +{ + return edma_read(ecc, EDMA_SHADOW0 + offset); +} + +static inline unsigned int edma_shadow0_read_array(struct edma_cc *ecc, + int offset, int i) +{ + return edma_read(ecc, EDMA_SHADOW0 + offset + (i << 2)); +} + +static inline void edma_shadow0_write(struct edma_cc *ecc, int offset, + unsigned val) +{ + edma_write(ecc, EDMA_SHADOW0 + offset, val); +} + +static inline void edma_shadow0_write_array(struct edma_cc *ecc, int offset, + int i, unsigned val) +{ + edma_write(ecc, EDMA_SHADOW0 + offset + (i << 2), val); +} + +static inline unsigned int edma_param_read(struct edma_cc *ecc, int offset, + int param_no) +{ + return edma_read(ecc, EDMA_PARM + offset + (param_no << 5)); +} + +static inline void edma_param_write(struct edma_cc *ecc, int offset, + int param_no, unsigned val) +{ + edma_write(ecc, EDMA_PARM + offset + (param_no << 5), val); +} + +static inline void edma_param_modify(struct edma_cc *ecc, int offset, + int param_no, unsigned and, unsigned or) +{ + edma_modify(ecc, EDMA_PARM + offset + (param_no << 5), and, or); +} + +static inline void edma_param_and(struct edma_cc *ecc, int offset, int param_no, + unsigned and) +{ + edma_and(ecc, EDMA_PARM + offset + (param_no << 5), and); +} + +static inline void edma_param_or(struct edma_cc *ecc, int offset, int param_no, + unsigned or) +{ + edma_or(ecc, EDMA_PARM + offset + (param_no << 5), or); +} + +static inline void set_bits(int offset, int len, unsigned long *p) +{ + for (; len > 0; len--) + set_bit(offset + (len - 1), p); +} + +static inline void clear_bits(int offset, int len, unsigned long *p) +{ + for (; len > 0; len--) + clear_bit(offset + (len - 1), p); +} + +static void edma_assign_priority_to_queue(struct edma_cc *ecc, int queue_no, + int priority) +{ + int bit = queue_no * 4; + + edma_modify(ecc, EDMA_QUEPRI, ~(0x7 << bit), ((priority & 0x7) << bit)); +} + +static void edma_set_chmap(struct edma_chan *echan, int slot) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + if (ecc->chmap_exist) { + slot = EDMA_CHAN_SLOT(slot); + edma_write_array(ecc, EDMA_DCHMAP, channel, (slot << 5)); + } +} + +static void edma_setup_interrupt(struct edma_chan *echan, bool enable) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + if (enable) { + edma_shadow0_write_array(ecc, SH_ICR, channel >> 5, + BIT(channel & 0x1f)); + edma_shadow0_write_array(ecc, SH_IESR, channel >> 5, + BIT(channel & 0x1f)); + } else { + edma_shadow0_write_array(ecc, SH_IECR, channel >> 5, + BIT(channel & 0x1f)); + } +} + +/* + * paRAM slot management functions + */ +static void edma_write_slot(struct edma_cc *ecc, unsigned slot, + const struct edmacc_param *param) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return; + memcpy_toio(ecc->base + PARM_OFFSET(slot), param, PARM_SIZE); +} + +static void edma_read_slot(struct edma_cc *ecc, unsigned slot, + struct edmacc_param *param) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return; + memcpy_fromio(param, ecc->base + PARM_OFFSET(slot), PARM_SIZE); +} + +/** + * edma_alloc_slot - allocate DMA parameter RAM + * @ecc: pointer to edma_cc struct + * @slot: specific slot to allocate; negative for "any unused slot" + * + * This allocates a parameter RAM slot, initializing it to hold a + * dummy transfer. Slots allocated using this routine have not been + * mapped to a hardware DMA channel, and will normally be used by + * linking to them from a slot associated with a DMA channel. + * + * Normal use is to pass EDMA_SLOT_ANY as the @slot, but specific + * slots may be allocated on behalf of DSP firmware. + * + * Returns the number of the slot, else negative errno. + */ +static int edma_alloc_slot(struct edma_cc *ecc, int slot) +{ + if (slot > 0) { + slot = EDMA_CHAN_SLOT(slot); + /* Requesting entry paRAM slot for a HW triggered channel. */ + if (ecc->chmap_exist && slot < ecc->num_channels) + slot = EDMA_SLOT_ANY; + } + + if (slot < 0) { + if (ecc->chmap_exist) + slot = 0; + else + slot = ecc->num_channels; + for (;;) { + slot = find_next_zero_bit(ecc->slot_inuse, + ecc->num_slots, + slot); + if (slot == ecc->num_slots) + return -ENOMEM; + if (!test_and_set_bit(slot, ecc->slot_inuse)) + break; + } + } else if (slot >= ecc->num_slots) { + return -EINVAL; + } else if (test_and_set_bit(slot, ecc->slot_inuse)) { + return -EBUSY; + } + + edma_write_slot(ecc, slot, &dummy_paramset); + + return EDMA_CTLR_CHAN(ecc->id, slot); +} + +static void edma_free_slot(struct edma_cc *ecc, unsigned slot) +{ + slot = EDMA_CHAN_SLOT(slot); + if (slot >= ecc->num_slots) + return; + + edma_write_slot(ecc, slot, &dummy_paramset); + clear_bit(slot, ecc->slot_inuse); +} + +/** + * edma_link - link one parameter RAM slot to another + * @ecc: pointer to edma_cc struct + * @from: parameter RAM slot originating the link + * @to: parameter RAM slot which is the link target + * + * The originating slot should not be part of any active DMA transfer. + */ +static void edma_link(struct edma_cc *ecc, unsigned from, unsigned to) +{ + if (unlikely(EDMA_CTLR(from) != EDMA_CTLR(to))) + dev_warn(ecc->dev, "Ignoring eDMA instance for linking\n"); + + from = EDMA_CHAN_SLOT(from); + to = EDMA_CHAN_SLOT(to); + if (from >= ecc->num_slots || to >= ecc->num_slots) + return; + + edma_param_modify(ecc, PARM_LINK_BCNTRLD, from, 0xffff0000, + PARM_OFFSET(to)); +} + +/** + * edma_get_position - returns the current transfer point + * @ecc: pointer to edma_cc struct + * @slot: parameter RAM slot being examined + * @dst: true selects the dest position, false the source + * + * Returns the position of the current active slot + */ +static dma_addr_t edma_get_position(struct edma_cc *ecc, unsigned slot, + bool dst) +{ + u32 offs; + + slot = EDMA_CHAN_SLOT(slot); + offs = PARM_OFFSET(slot); + offs += dst ? PARM_DST : PARM_SRC; + + return edma_read(ecc, offs); +} + +/* + * Channels with event associations will be triggered by their hardware + * events, and channels without such associations will be triggered by + * software. (At this writing there is no interface for using software + * triggers except with channels that don't support hardware triggers.) + */ +static void edma_start(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + if (!echan->hw_triggered) { + /* EDMA channels without event association */ + dev_dbg(ecc->dev, "ESR%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_ESR, j)); + edma_shadow0_write_array(ecc, SH_ESR, j, mask); + } else { + /* EDMA channel with event association */ + dev_dbg(ecc->dev, "ER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_ER, j)); + /* Clear any pending event or error */ + edma_write_array(ecc, EDMA_ECR, j, mask); + edma_write_array(ecc, EDMA_EMCR, j, mask); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_shadow0_write_array(ecc, SH_EESR, j, mask); + dev_dbg(ecc->dev, "EER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_EER, j)); + } +} + +static void edma_stop(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(ecc, SH_EECR, j, mask); + edma_shadow0_write_array(ecc, SH_ECR, j, mask); + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_write_array(ecc, EDMA_EMCR, j, mask); + + /* clear possibly pending completion interrupt */ + edma_shadow0_write_array(ecc, SH_ICR, j, mask); + + dev_dbg(ecc->dev, "EER%d %08x\n", j, + edma_shadow0_read_array(ecc, SH_EER, j)); + + /* REVISIT: consider guarding against inappropriate event + * chaining by overwriting with dummy_paramset. + */ +} + +/* + * Temporarily disable EDMA hardware events on the specified channel, + * preventing them from triggering new transfers + */ +static void edma_pause(struct edma_chan *echan) +{ + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(echan->ecc, SH_EECR, channel >> 5, mask); +} + +/* Re-enable EDMA hardware events on the specified channel. */ +static void edma_resume(struct edma_chan *echan) +{ + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(echan->ecc, SH_EESR, channel >> 5, mask); +} + +static void edma_trigger_channel(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + unsigned int mask = BIT(channel & 0x1f); + + edma_shadow0_write_array(ecc, SH_ESR, (channel >> 5), mask); + + dev_dbg(ecc->dev, "ESR%d %08x\n", (channel >> 5), + edma_shadow0_read_array(ecc, SH_ESR, (channel >> 5))); +} + +static void edma_clean_channel(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int j = (channel >> 5); + unsigned int mask = BIT(channel & 0x1f); + + dev_dbg(ecc->dev, "EMR%d %08x\n", j, edma_read_array(ecc, EDMA_EMR, j)); + edma_shadow0_write_array(ecc, SH_ECR, j, mask); + /* Clear the corresponding EMR bits */ + edma_write_array(ecc, EDMA_EMCR, j, mask); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, mask); + edma_write(ecc, EDMA_CCERRCLR, BIT(16) | BIT(1) | BIT(0)); +} + +/* Move channel to a specific event queue */ +static void edma_assign_channel_eventq(struct edma_chan *echan, + enum dma_event_q eventq_no) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + int bit = (channel & 0x7) * 4; + + /* default to low priority queue */ + if (eventq_no == EVENTQ_DEFAULT) + eventq_no = ecc->default_queue; + if (eventq_no >= ecc->num_tc) + return; + + eventq_no &= 7; + edma_modify_array(ecc, EDMA_DMAQNUM, (channel >> 3), ~(0x7 << bit), + eventq_no << bit); +} + +static int edma_alloc_channel(struct edma_chan *echan, + enum dma_event_q eventq_no) +{ + struct edma_cc *ecc = echan->ecc; + int channel = EDMA_CHAN_SLOT(echan->ch_num); + + /* ensure access through shadow region 0 */ + edma_or_array2(ecc, EDMA_DRAE, 0, channel >> 5, BIT(channel & 0x1f)); + + /* ensure no events are pending */ + edma_stop(echan); + + edma_setup_interrupt(echan, true); + + edma_assign_channel_eventq(echan, eventq_no); + + return 0; +} + +static void edma_free_channel(struct edma_chan *echan) +{ + /* ensure no events are pending */ + edma_stop(echan); + /* REVISIT should probably take out of shadow region 0 */ + edma_setup_interrupt(echan, false); +} + static inline struct edma_cc *to_edma_cc(struct dma_device *d) { return container_of(d, struct edma_cc, dma_slave); @@ -135,8 +729,7 @@ static inline struct edma_chan *to_edma_chan(struct dma_chan *c) return container_of(c, struct edma_chan, vchan.chan); } -static inline struct edma_desc -*to_edma_desc(struct dma_async_tx_descriptor *tx) +static inline struct edma_desc *to_edma_desc(struct dma_async_tx_descriptor *tx) { return container_of(tx, struct edma_desc, vdesc.tx); } @@ -149,20 +742,17 @@ static void edma_desc_free(struct virt_dma_desc *vdesc) /* Dispatch a queued descriptor to the controller (caller holds lock) */ static void edma_execute(struct edma_chan *echan) { + struct edma_cc *ecc = echan->ecc; struct virt_dma_desc *vdesc; struct edma_desc *edesc; struct device *dev = echan->vchan.chan.device->dev; int i, j, left, nslots; - /* If either we processed all psets or we're still not started */ - if (!echan->edesc || - echan->edesc->pset_nr == echan->edesc->processed) { - /* Get next vdesc */ + if (!echan->edesc) { + /* Setup is needed for the first transfer */ vdesc = vchan_next_desc(&echan->vchan); - if (!vdesc) { - echan->edesc = NULL; + if (!vdesc) return; - } list_del(&vdesc->node); echan->edesc = to_edma_desc(&vdesc->tx); } @@ -177,32 +767,32 @@ static void edma_execute(struct edma_chan *echan) /* Write descriptor PaRAM set(s) */ for (i = 0; i < nslots; i++) { j = i + edesc->processed; - edma_write_slot(echan->slot[i], &edesc->pset[j].param); + edma_write_slot(ecc, echan->slot[i], &edesc->pset[j].param); edesc->sg_len += edesc->pset[j].len; - dev_vdbg(echan->vchan.chan.device->dev, - "\n pset[%d]:\n" - " chnum\t%d\n" - " slot\t%d\n" - " opt\t%08x\n" - " src\t%08x\n" - " dst\t%08x\n" - " abcnt\t%08x\n" - " ccnt\t%08x\n" - " bidx\t%08x\n" - " cidx\t%08x\n" - " lkrld\t%08x\n", - j, echan->ch_num, echan->slot[i], - edesc->pset[j].param.opt, - edesc->pset[j].param.src, - edesc->pset[j].param.dst, - edesc->pset[j].param.a_b_cnt, - edesc->pset[j].param.ccnt, - edesc->pset[j].param.src_dst_bidx, - edesc->pset[j].param.src_dst_cidx, - edesc->pset[j].param.link_bcntrld); + dev_vdbg(dev, + "\n pset[%d]:\n" + " chnum\t%d\n" + " slot\t%d\n" + " opt\t%08x\n" + " src\t%08x\n" + " dst\t%08x\n" + " abcnt\t%08x\n" + " ccnt\t%08x\n" + " bidx\t%08x\n" + " cidx\t%08x\n" + " lkrld\t%08x\n", + j, echan->ch_num, echan->slot[i], + edesc->pset[j].param.opt, + edesc->pset[j].param.src, + edesc->pset[j].param.dst, + edesc->pset[j].param.a_b_cnt, + edesc->pset[j].param.ccnt, + edesc->pset[j].param.src_dst_bidx, + edesc->pset[j].param.src_dst_cidx, + edesc->pset[j].param.link_bcntrld); /* Link to the previous slot if not the last set */ if (i != (nslots - 1)) - edma_link(echan->slot[i], echan->slot[i+1]); + edma_link(ecc, echan->slot[i], echan->slot[i + 1]); } edesc->processed += nslots; @@ -214,34 +804,32 @@ static void edma_execute(struct edma_chan *echan) */ if (edesc->processed == edesc->pset_nr) { if (edesc->cyclic) - edma_link(echan->slot[nslots-1], echan->slot[1]); + edma_link(ecc, echan->slot[nslots - 1], echan->slot[1]); else - edma_link(echan->slot[nslots-1], + edma_link(ecc, echan->slot[nslots - 1], echan->ecc->dummy_slot); } - if (edesc->processed <= MAX_NR_SG) { + if (echan->missed) { + /* + * This happens due to setup times between intermediate + * transfers in long SG lists which have to be broken up into + * transfers of MAX_NR_SG + */ + dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); + edma_clean_channel(echan); + edma_stop(echan); + edma_start(echan); + edma_trigger_channel(echan); + echan->missed = 0; + } else if (edesc->processed <= MAX_NR_SG) { dev_dbg(dev, "first transfer starting on channel %d\n", echan->ch_num); - edma_start(echan->ch_num); + edma_start(echan); } else { dev_dbg(dev, "chan: %d: completed %d elements, resuming\n", echan->ch_num, edesc->processed); - edma_resume(echan->ch_num); - } - - /* - * This happens due to setup times between intermediate transfers - * in long SG lists which have to be broken up into transfers of - * MAX_NR_SG - */ - if (echan->missed) { - dev_dbg(dev, "missed event on channel %d\n", echan->ch_num); - edma_clean_channel(echan->ch_num); - edma_stop(echan->ch_num); - edma_start(echan->ch_num); - edma_trigger_channel(echan->ch_num); - echan->missed = 0; + edma_resume(echan); } } @@ -259,20 +847,16 @@ static int edma_terminate_all(struct dma_chan *chan) * echan->edesc is NULL and exit.) */ if (echan->edesc) { - int cyclic = echan->edesc->cyclic; - + edma_stop(echan); + /* Move the cyclic channel back to default queue */ + if (!echan->tc && echan->edesc->cyclic) + edma_assign_channel_eventq(echan, EVENTQ_DEFAULT); /* * free the running request descriptor * since it is not in any of the vdesc lists */ edma_desc_free(&echan->edesc->vdesc); - echan->edesc = NULL; - edma_stop(echan->ch_num); - /* Move the cyclic channel back to default queue */ - if (cyclic) - edma_assign_channel_eventq(echan->ch_num, - EVENTQ_DEFAULT); } vchan_get_all_descriptors(&echan->vchan, &head); @@ -303,7 +887,7 @@ static int edma_dma_pause(struct dma_chan *chan) if (!echan->edesc) return -EINVAL; - edma_pause(echan->ch_num); + edma_pause(echan); return 0; } @@ -311,7 +895,7 @@ static int edma_dma_resume(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); - edma_resume(echan->ch_num); + edma_resume(echan); return 0; } @@ -327,19 +911,17 @@ static int edma_dma_resume(struct dma_chan *chan) * @direction: Direction of the transfer */ static int edma_config_pset(struct dma_chan *chan, struct edma_pset *epset, - dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, - enum dma_slave_buswidth dev_width, unsigned int dma_length, - enum dma_transfer_direction direction) + dma_addr_t src_addr, dma_addr_t dst_addr, u32 burst, + unsigned int acnt, unsigned int dma_length, + enum dma_transfer_direction direction) { struct edma_chan *echan = to_edma_chan(chan); struct device *dev = chan->device->dev; struct edmacc_param *param = &epset->param; - int acnt, bcnt, ccnt, cidx; + int bcnt, ccnt, cidx; int src_bidx, dst_bidx, src_cidx, dst_cidx; int absync; - acnt = dev_width; - /* src/dst_maxburst == 0 is the same case as src/dst_maxburst == 1 */ if (!burst) burst = 1; @@ -475,8 +1057,8 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( return NULL; } - edesc = kzalloc(sizeof(*edesc) + sg_len * - sizeof(edesc->pset[0]), GFP_ATOMIC); + edesc = kzalloc(sizeof(*edesc) + sg_len * sizeof(edesc->pset[0]), + GFP_ATOMIC); if (!edesc) { dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; @@ -493,8 +1075,7 @@ static struct dma_async_tx_descriptor *edma_prep_slave_sg( for (i = 0; i < nslots; i++) { if (echan->slot[i] < 0) { echan->slot[i] = - edma_alloc_slot(EDMA_CTLR(echan->ch_num), - EDMA_SLOT_ANY); + edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "%s: Failed to allocate slot\n", @@ -541,36 +1122,98 @@ static struct dma_async_tx_descriptor *edma_prep_dma_memcpy( struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, size_t len, unsigned long tx_flags) { - int ret; + int ret, nslots; struct edma_desc *edesc; struct device *dev = chan->device->dev; struct edma_chan *echan = to_edma_chan(chan); + unsigned int width, pset_len; if (unlikely(!echan || !len)) return NULL; - edesc = kzalloc(sizeof(*edesc) + sizeof(edesc->pset[0]), GFP_ATOMIC); + if (len < SZ_64K) { + /* + * Transfer size less than 64K can be handled with one paRAM + * slot and with one burst. + * ACNT = length + */ + width = len; + pset_len = len; + nslots = 1; + } else { + /* + * Transfer size bigger than 64K will be handled with maximum of + * two paRAM slots. + * slot1: (full_length / 32767) times 32767 bytes bursts. + * ACNT = 32767, length1: (full_length / 32767) * 32767 + * slot2: the remaining amount of data after slot1. + * ACNT = full_length - length1, length2 = ACNT + * + * When the full_length is multibple of 32767 one slot can be + * used to complete the transfer. + */ + width = SZ_32K - 1; + pset_len = rounddown(len, width); + /* One slot is enough for lengths multiple of (SZ_32K -1) */ + if (unlikely(pset_len == len)) + nslots = 1; + else + nslots = 2; + } + + edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), + GFP_ATOMIC); if (!edesc) { dev_dbg(dev, "Failed to allocate a descriptor\n"); return NULL; } - edesc->pset_nr = 1; + edesc->pset_nr = nslots; + edesc->residue = edesc->residue_stat = len; + edesc->direction = DMA_MEM_TO_MEM; + edesc->echan = echan; ret = edma_config_pset(chan, &edesc->pset[0], src, dest, 1, - DMA_SLAVE_BUSWIDTH_4_BYTES, len, DMA_MEM_TO_MEM); - if (ret < 0) + width, pset_len, DMA_MEM_TO_MEM); + if (ret < 0) { + kfree(edesc); return NULL; + } edesc->absync = ret; - /* - * Enable intermediate transfer chaining to re-trigger channel - * on completion of every TR, and enable transfer-completion - * interrupt on completion of the whole transfer. - */ edesc->pset[0].param.opt |= ITCCHEN; - edesc->pset[0].param.opt |= TCINTEN; + if (nslots == 1) { + /* Enable transfer complete interrupt */ + edesc->pset[0].param.opt |= TCINTEN; + } else { + /* Enable transfer complete chaining for the first slot */ + edesc->pset[0].param.opt |= TCCHEN; + + if (echan->slot[1] < 0) { + echan->slot[1] = edma_alloc_slot(echan->ecc, + EDMA_SLOT_ANY); + if (echan->slot[1] < 0) { + kfree(edesc); + dev_err(dev, "%s: Failed to allocate slot\n", + __func__); + return NULL; + } + } + dest += pset_len; + src += pset_len; + pset_len = width = len % (SZ_32K - 1); + + ret = edma_config_pset(chan, &edesc->pset[1], src, dest, 1, + width, pset_len, DMA_MEM_TO_MEM); + if (ret < 0) { + kfree(edesc); + return NULL; + } + + edesc->pset[1].param.opt |= ITCCHEN; + edesc->pset[1].param.opt |= TCINTEN; + } return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } @@ -629,8 +1272,8 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( if (nslots > MAX_NR_SG) return NULL; - edesc = kzalloc(sizeof(*edesc) + nslots * - sizeof(edesc->pset[0]), GFP_ATOMIC); + edesc = kzalloc(sizeof(*edesc) + nslots * sizeof(edesc->pset[0]), + GFP_ATOMIC); if (!edesc) { dev_err(dev, "%s: Failed to allocate a descriptor\n", __func__); return NULL; @@ -649,8 +1292,7 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( /* Allocate a PaRAM slot, if needed */ if (echan->slot[i] < 0) { echan->slot[i] = - edma_alloc_slot(EDMA_CTLR(echan->ch_num), - EDMA_SLOT_ANY); + edma_alloc_slot(echan->ecc, EDMA_SLOT_ANY); if (echan->slot[i] < 0) { kfree(edesc); dev_err(dev, "%s: Failed to allocate slot\n", @@ -711,128 +1353,281 @@ static struct dma_async_tx_descriptor *edma_prep_dma_cyclic( } /* Place the cyclic channel to highest priority queue */ - edma_assign_channel_eventq(echan->ch_num, EVENTQ_0); + if (!echan->tc) + edma_assign_channel_eventq(echan, EVENTQ_0); return vchan_tx_prep(&echan->vchan, &edesc->vdesc, tx_flags); } -static void edma_callback(unsigned ch_num, u16 ch_status, void *data) +static void edma_completion_handler(struct edma_chan *echan) { - struct edma_chan *echan = data; struct device *dev = echan->vchan.chan.device->dev; - struct edma_desc *edesc; - struct edmacc_param p; + struct edma_desc *edesc = echan->edesc; - edesc = echan->edesc; + if (!edesc) + return; - /* Pause the channel for non-cyclic */ - if (!edesc || (edesc && !edesc->cyclic)) - edma_pause(echan->ch_num); - - switch (ch_status) { - case EDMA_DMA_COMPLETE: - spin_lock(&echan->vchan.lock); - - if (edesc) { - if (edesc->cyclic) { - vchan_cyclic_callback(&edesc->vdesc); - } else if (edesc->processed == edesc->pset_nr) { - dev_dbg(dev, "Transfer complete, stopping channel %d\n", ch_num); - edesc->residue = 0; - edma_stop(echan->ch_num); - vchan_cookie_complete(&edesc->vdesc); - edma_execute(echan); - } else { - dev_dbg(dev, "Intermediate transfer complete on channel %d\n", ch_num); - - /* Update statistics for tx_status */ - edesc->residue -= edesc->sg_len; - edesc->residue_stat = edesc->residue; - edesc->processed_stat = edesc->processed; - - edma_execute(echan); - } + spin_lock(&echan->vchan.lock); + if (edesc->cyclic) { + vchan_cyclic_callback(&edesc->vdesc); + spin_unlock(&echan->vchan.lock); + return; + } else if (edesc->processed == edesc->pset_nr) { + edesc->residue = 0; + edma_stop(echan); + vchan_cookie_complete(&edesc->vdesc); + echan->edesc = NULL; + + dev_dbg(dev, "Transfer completed on channel %d\n", + echan->ch_num); + } else { + dev_dbg(dev, "Sub transfer completed on channel %d\n", + echan->ch_num); + + edma_pause(echan); + + /* Update statistics for tx_status */ + edesc->residue -= edesc->sg_len; + edesc->residue_stat = edesc->residue; + edesc->processed_stat = edesc->processed; + } + edma_execute(echan); + + spin_unlock(&echan->vchan.lock); +} + +/* eDMA interrupt handler */ +static irqreturn_t dma_irq_handler(int irq, void *data) +{ + struct edma_cc *ecc = data; + int ctlr; + u32 sh_ier; + u32 sh_ipr; + u32 bank; + + ctlr = ecc->id; + if (ctlr < 0) + return IRQ_NONE; + + dev_vdbg(ecc->dev, "dma_irq_handler\n"); + + sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 0); + if (!sh_ipr) { + sh_ipr = edma_shadow0_read_array(ecc, SH_IPR, 1); + if (!sh_ipr) + return IRQ_NONE; + sh_ier = edma_shadow0_read_array(ecc, SH_IER, 1); + bank = 1; + } else { + sh_ier = edma_shadow0_read_array(ecc, SH_IER, 0); + bank = 0; + } + + do { + u32 slot; + u32 channel; + + slot = __ffs(sh_ipr); + sh_ipr &= ~(BIT(slot)); + + if (sh_ier & BIT(slot)) { + channel = (bank << 5) | slot; + /* Clear the corresponding IPR bits */ + edma_shadow0_write_array(ecc, SH_ICR, bank, BIT(slot)); + edma_completion_handler(&ecc->slave_chans[channel]); } + } while (sh_ipr); - spin_unlock(&echan->vchan.lock); + edma_shadow0_write(ecc, SH_IEVAL, 1); + return IRQ_HANDLED; +} + +static void edma_error_handler(struct edma_chan *echan) +{ + struct edma_cc *ecc = echan->ecc; + struct device *dev = echan->vchan.chan.device->dev; + struct edmacc_param p; - break; - case EDMA_DMA_CC_ERROR: - spin_lock(&echan->vchan.lock); + if (!echan->edesc) + return; - edma_read_slot(EDMA_CHAN_SLOT(echan->slot[0]), &p); + spin_lock(&echan->vchan.lock); + edma_read_slot(ecc, echan->slot[0], &p); + /* + * Issue later based on missed flag which will be sure + * to happen as: + * (1) we finished transmitting an intermediate slot and + * edma_execute is coming up. + * (2) or we finished current transfer and issue will + * call edma_execute. + * + * Important note: issuing can be dangerous here and + * lead to some nasty recursion when we are in a NULL + * slot. So we avoid doing so and set the missed flag. + */ + if (p.a_b_cnt == 0 && p.ccnt == 0) { + dev_dbg(dev, "Error on null slot, setting miss\n"); + echan->missed = 1; + } else { /* - * Issue later based on missed flag which will be sure - * to happen as: - * (1) we finished transmitting an intermediate slot and - * edma_execute is coming up. - * (2) or we finished current transfer and issue will - * call edma_execute. - * - * Important note: issuing can be dangerous here and - * lead to some nasty recursion when we are in a NULL - * slot. So we avoid doing so and set the missed flag. + * The slot is already programmed but the event got + * missed, so its safe to issue it here. */ - if (p.a_b_cnt == 0 && p.ccnt == 0) { - dev_dbg(dev, "Error occurred, looks like slot is null, just setting miss\n"); - echan->missed = 1; - } else { - /* - * The slot is already programmed but the event got - * missed, so its safe to issue it here. - */ - dev_dbg(dev, "Error occurred but slot is non-null, TRIGGERING\n"); - edma_clean_channel(echan->ch_num); - edma_stop(echan->ch_num); - edma_start(echan->ch_num); - edma_trigger_channel(echan->ch_num); + dev_dbg(dev, "Missed event, TRIGGERING\n"); + edma_clean_channel(echan); + edma_stop(echan); + edma_start(echan); + edma_trigger_channel(echan); + } + spin_unlock(&echan->vchan.lock); +} + +static inline bool edma_error_pending(struct edma_cc *ecc) +{ + if (edma_read_array(ecc, EDMA_EMR, 0) || + edma_read_array(ecc, EDMA_EMR, 1) || + edma_read(ecc, EDMA_QEMR) || edma_read(ecc, EDMA_CCERR)) + return true; + + return false; +} + +/* eDMA error interrupt handler */ +static irqreturn_t dma_ccerr_handler(int irq, void *data) +{ + struct edma_cc *ecc = data; + int i, j; + int ctlr; + unsigned int cnt = 0; + unsigned int val; + + ctlr = ecc->id; + if (ctlr < 0) + return IRQ_NONE; + + dev_vdbg(ecc->dev, "dma_ccerr_handler\n"); + + if (!edma_error_pending(ecc)) + return IRQ_NONE; + + while (1) { + /* Event missed register(s) */ + for (j = 0; j < 2; j++) { + unsigned long emr; + + val = edma_read_array(ecc, EDMA_EMR, j); + if (!val) + continue; + + dev_dbg(ecc->dev, "EMR%d 0x%08x\n", j, val); + emr = val; + for (i = find_next_bit(&emr, 32, 0); i < 32; + i = find_next_bit(&emr, 32, i + 1)) { + int k = (j << 5) + i; + + /* Clear the corresponding EMR bits */ + edma_write_array(ecc, EDMA_EMCR, j, BIT(i)); + /* Clear any SER */ + edma_shadow0_write_array(ecc, SH_SECR, j, + BIT(i)); + edma_error_handler(&ecc->slave_chans[k]); + } } - spin_unlock(&echan->vchan.lock); + val = edma_read(ecc, EDMA_QEMR); + if (val) { + dev_dbg(ecc->dev, "QEMR 0x%02x\n", val); + /* Not reported, just clear the interrupt reason. */ + edma_write(ecc, EDMA_QEMCR, val); + edma_shadow0_write(ecc, SH_QSECR, val); + } - break; - default: - break; + val = edma_read(ecc, EDMA_CCERR); + if (val) { + dev_warn(ecc->dev, "CCERR 0x%08x\n", val); + /* Not reported, just clear the interrupt reason. */ + edma_write(ecc, EDMA_CCERRCLR, val); + } + + if (!edma_error_pending(ecc)) + break; + cnt++; + if (cnt > 10) + break; } + edma_write(ecc, EDMA_EEVAL, 1); + return IRQ_HANDLED; +} + +static void edma_tc_set_pm_state(struct edma_tc *tc, bool enable) +{ + struct platform_device *tc_pdev; + int ret; + + if (!IS_ENABLED(CONFIG_OF) || !tc) + return; + + tc_pdev = of_find_device_by_node(tc->node); + if (!tc_pdev) { + pr_err("%s: TPTC device is not found\n", __func__); + return; + } + if (!pm_runtime_enabled(&tc_pdev->dev)) + pm_runtime_enable(&tc_pdev->dev); + + if (enable) + ret = pm_runtime_get_sync(&tc_pdev->dev); + else + ret = pm_runtime_put_sync(&tc_pdev->dev); + + if (ret < 0) + pr_err("%s: pm_runtime_%s_sync() failed for %s\n", __func__, + enable ? "get" : "put", dev_name(&tc_pdev->dev)); } /* Alloc channel resources */ static int edma_alloc_chan_resources(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); - struct device *dev = chan->device->dev; + struct edma_cc *ecc = echan->ecc; + struct device *dev = ecc->dev; + enum dma_event_q eventq_no = EVENTQ_DEFAULT; int ret; - int a_ch_num; - LIST_HEAD(descs); - a_ch_num = edma_alloc_channel(echan->ch_num, edma_callback, - echan, EVENTQ_DEFAULT); - - if (a_ch_num < 0) { - ret = -ENODEV; - goto err_no_chan; + if (echan->tc) { + eventq_no = echan->tc->id; + } else if (ecc->tc_list) { + /* memcpy channel */ + echan->tc = &ecc->tc_list[ecc->info->default_queue]; + eventq_no = echan->tc->id; } - if (a_ch_num != echan->ch_num) { - dev_err(dev, "failed to allocate requested channel %u:%u\n", - EDMA_CTLR(echan->ch_num), + ret = edma_alloc_channel(echan, eventq_no); + if (ret) + return ret; + + echan->slot[0] = edma_alloc_slot(ecc, echan->ch_num); + if (echan->slot[0] < 0) { + dev_err(dev, "Entry slot allocation failed for channel %u\n", EDMA_CHAN_SLOT(echan->ch_num)); - ret = -ENODEV; - goto err_wrong_chan; + goto err_slot; } + /* Set up channel -> slot mapping for the entry slot */ + edma_set_chmap(echan, echan->slot[0]); echan->alloced = true; - echan->slot[0] = echan->ch_num; - dev_dbg(dev, "allocated channel %d for %u:%u\n", echan->ch_num, - EDMA_CTLR(echan->ch_num), EDMA_CHAN_SLOT(echan->ch_num)); + dev_dbg(dev, "Got eDMA channel %d for virt channel %d (%s trigger)\n", + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id, + echan->hw_triggered ? "HW" : "SW"); + + edma_tc_set_pm_state(echan->tc, true); return 0; -err_wrong_chan: - edma_free_channel(a_ch_num); -err_no_chan: +err_slot: + edma_free_channel(echan); return ret; } @@ -840,29 +1635,37 @@ err_no_chan: static void edma_free_chan_resources(struct dma_chan *chan) { struct edma_chan *echan = to_edma_chan(chan); - struct device *dev = chan->device->dev; + struct device *dev = echan->ecc->dev; int i; /* Terminate transfers */ - edma_stop(echan->ch_num); + edma_stop(echan); vchan_free_chan_resources(&echan->vchan); /* Free EDMA PaRAM slots */ - for (i = 1; i < EDMA_MAX_SLOTS; i++) { + for (i = 0; i < EDMA_MAX_SLOTS; i++) { if (echan->slot[i] >= 0) { - edma_free_slot(echan->slot[i]); + edma_free_slot(echan->ecc, echan->slot[i]); echan->slot[i] = -1; } } + /* Set entry slot to the dummy slot */ + edma_set_chmap(echan, echan->ecc->dummy_slot); + /* Free EDMA channel */ if (echan->alloced) { - edma_free_channel(echan->ch_num); + edma_free_channel(echan); echan->alloced = false; } - dev_dbg(dev, "freeing channel for %u\n", echan->ch_num); + edma_tc_set_pm_state(echan->tc, false); + echan->tc = NULL; + echan->hw_triggered = false; + + dev_dbg(dev, "Free eDMA channel %d for virt channel %d\n", + EDMA_CHAN_SLOT(echan->ch_num), chan->chan_id); } /* Send pending descriptor to hardware */ @@ -888,7 +1691,7 @@ static u32 edma_residue(struct edma_desc *edesc) * We always read the dst/src position from the first RamPar * pset. That's the one which is active now. */ - pos = edma_get_position(edesc->echan->slot[0], dst); + pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst); /* * Cyclic is simple. Just subtract pset[0].addr from pos. @@ -949,19 +1752,99 @@ static enum dma_status edma_tx_status(struct dma_chan *chan, return ret; } -static void __init edma_chan_init(struct edma_cc *ecc, - struct dma_device *dma, - struct edma_chan *echans) +static bool edma_is_memcpy_channel(int ch_num, s32 *memcpy_channels) { + if (!memcpy_channels) + return false; + while (*memcpy_channels != -1) { + if (*memcpy_channels == ch_num) + return true; + memcpy_channels++; + } + return false; +} + +#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ + BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ + BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) + +static void edma_dma_init(struct edma_cc *ecc, bool legacy_mode) +{ + struct dma_device *s_ddev = &ecc->dma_slave; + struct dma_device *m_ddev = NULL; + s32 *memcpy_channels = ecc->info->memcpy_channels; int i, j; - for (i = 0; i < EDMA_CHANS; i++) { - struct edma_chan *echan = &echans[i]; - echan->ch_num = EDMA_CTLR_CHAN(ecc->ctlr, i); + dma_cap_zero(s_ddev->cap_mask); + dma_cap_set(DMA_SLAVE, s_ddev->cap_mask); + dma_cap_set(DMA_CYCLIC, s_ddev->cap_mask); + if (ecc->legacy_mode && !memcpy_channels) { + dev_warn(ecc->dev, + "Legacy memcpy is enabled, things might not work\n"); + + dma_cap_set(DMA_MEMCPY, s_ddev->cap_mask); + s_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; + s_ddev->directions = BIT(DMA_MEM_TO_MEM); + } + + s_ddev->device_prep_slave_sg = edma_prep_slave_sg; + s_ddev->device_prep_dma_cyclic = edma_prep_dma_cyclic; + s_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; + s_ddev->device_free_chan_resources = edma_free_chan_resources; + s_ddev->device_issue_pending = edma_issue_pending; + s_ddev->device_tx_status = edma_tx_status; + s_ddev->device_config = edma_slave_config; + s_ddev->device_pause = edma_dma_pause; + s_ddev->device_resume = edma_dma_resume; + s_ddev->device_terminate_all = edma_terminate_all; + + s_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; + s_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; + s_ddev->directions |= (BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV)); + s_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + s_ddev->dev = ecc->dev; + INIT_LIST_HEAD(&s_ddev->channels); + + if (memcpy_channels) { + m_ddev = devm_kzalloc(ecc->dev, sizeof(*m_ddev), GFP_KERNEL); + ecc->dma_memcpy = m_ddev; + + dma_cap_zero(m_ddev->cap_mask); + dma_cap_set(DMA_MEMCPY, m_ddev->cap_mask); + + m_ddev->device_prep_dma_memcpy = edma_prep_dma_memcpy; + m_ddev->device_alloc_chan_resources = edma_alloc_chan_resources; + m_ddev->device_free_chan_resources = edma_free_chan_resources; + m_ddev->device_issue_pending = edma_issue_pending; + m_ddev->device_tx_status = edma_tx_status; + m_ddev->device_config = edma_slave_config; + m_ddev->device_pause = edma_dma_pause; + m_ddev->device_resume = edma_dma_resume; + m_ddev->device_terminate_all = edma_terminate_all; + + m_ddev->src_addr_widths = EDMA_DMA_BUSWIDTHS; + m_ddev->dst_addr_widths = EDMA_DMA_BUSWIDTHS; + m_ddev->directions = BIT(DMA_MEM_TO_MEM); + m_ddev->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + + m_ddev->dev = ecc->dev; + INIT_LIST_HEAD(&m_ddev->channels); + } else if (!ecc->legacy_mode) { + dev_info(ecc->dev, "memcpy is disabled\n"); + } + + for (i = 0; i < ecc->num_channels; i++) { + struct edma_chan *echan = &ecc->slave_chans[i]; + echan->ch_num = EDMA_CTLR_CHAN(ecc->id, i); echan->ecc = ecc; echan->vchan.desc_free = edma_desc_free; - vchan_init(&echan->vchan, dma); + if (m_ddev && edma_is_memcpy_channel(i, memcpy_channels)) + vchan_init(&echan->vchan, m_ddev); + else + vchan_init(&echan->vchan, s_ddev); INIT_LIST_HEAD(&echan->node); for (j = 0; j < EDMA_MAX_SLOTS; j++) @@ -969,85 +1852,493 @@ static void __init edma_chan_init(struct edma_cc *ecc, } } -#define EDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ - BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \ - BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) - -static void edma_dma_init(struct edma_cc *ecc, struct dma_device *dma, - struct device *dev) +static int edma_setup_from_hw(struct device *dev, struct edma_soc_info *pdata, + struct edma_cc *ecc) { - dma->device_prep_slave_sg = edma_prep_slave_sg; - dma->device_prep_dma_cyclic = edma_prep_dma_cyclic; - dma->device_prep_dma_memcpy = edma_prep_dma_memcpy; - dma->device_alloc_chan_resources = edma_alloc_chan_resources; - dma->device_free_chan_resources = edma_free_chan_resources; - dma->device_issue_pending = edma_issue_pending; - dma->device_tx_status = edma_tx_status; - dma->device_config = edma_slave_config; - dma->device_pause = edma_dma_pause; - dma->device_resume = edma_dma_resume; - dma->device_terminate_all = edma_terminate_all; + int i; + u32 value, cccfg; + s8 (*queue_priority_map)[2]; + + /* Decode the eDMA3 configuration from CCCFG register */ + cccfg = edma_read(ecc, EDMA_CCCFG); - dma->src_addr_widths = EDMA_DMA_BUSWIDTHS; - dma->dst_addr_widths = EDMA_DMA_BUSWIDTHS; - dma->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); - dma->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; + value = GET_NUM_REGN(cccfg); + ecc->num_region = BIT(value); - dma->dev = dev; + value = GET_NUM_DMACH(cccfg); + ecc->num_channels = BIT(value + 1); + + value = GET_NUM_QDMACH(cccfg); + ecc->num_qchannels = value * 2; + + value = GET_NUM_PAENTRY(cccfg); + ecc->num_slots = BIT(value + 4); + + value = GET_NUM_EVQUE(cccfg); + ecc->num_tc = value + 1; + + ecc->chmap_exist = (cccfg & CHMAP_EXIST) ? true : false; + + dev_dbg(dev, "eDMA3 CC HW configuration (cccfg: 0x%08x):\n", cccfg); + dev_dbg(dev, "num_region: %u\n", ecc->num_region); + dev_dbg(dev, "num_channels: %u\n", ecc->num_channels); + dev_dbg(dev, "num_qchannels: %u\n", ecc->num_qchannels); + dev_dbg(dev, "num_slots: %u\n", ecc->num_slots); + dev_dbg(dev, "num_tc: %u\n", ecc->num_tc); + dev_dbg(dev, "chmap_exist: %s\n", ecc->chmap_exist ? "yes" : "no"); + + /* Nothing need to be done if queue priority is provided */ + if (pdata->queue_priority_mapping) + return 0; /* - * code using dma memcpy must make sure alignment of - * length is at dma->copy_align boundary. + * Configure TC/queue priority as follows: + * Q0 - priority 0 + * Q1 - priority 1 + * Q2 - priority 2 + * ... + * The meaning of priority numbers: 0 highest priority, 7 lowest + * priority. So Q0 is the highest priority queue and the last queue has + * the lowest priority. */ - dma->copy_align = DMAENGINE_ALIGN_4_BYTES; + queue_priority_map = devm_kcalloc(dev, ecc->num_tc + 1, sizeof(s8), + GFP_KERNEL); + if (!queue_priority_map) + return -ENOMEM; + + for (i = 0; i < ecc->num_tc; i++) { + queue_priority_map[i][0] = i; + queue_priority_map[i][1] = i; + } + queue_priority_map[i][0] = -1; + queue_priority_map[i][1] = -1; + + pdata->queue_priority_mapping = queue_priority_map; + /* Default queue has the lowest priority */ + pdata->default_queue = i - 1; + + return 0; +} + +#if IS_ENABLED(CONFIG_OF) +static int edma_xbar_event_map(struct device *dev, struct edma_soc_info *pdata, + size_t sz) +{ + const char pname[] = "ti,edma-xbar-event-map"; + struct resource res; + void __iomem *xbar; + s16 (*xbar_chans)[2]; + size_t nelm = sz / sizeof(s16); + u32 shift, offset, mux; + int ret, i; + + xbar_chans = devm_kcalloc(dev, nelm + 2, sizeof(s16), GFP_KERNEL); + if (!xbar_chans) + return -ENOMEM; + + ret = of_address_to_resource(dev->of_node, 1, &res); + if (ret) + return -ENOMEM; + + xbar = devm_ioremap(dev, res.start, resource_size(&res)); + if (!xbar) + return -ENOMEM; + + ret = of_property_read_u16_array(dev->of_node, pname, (u16 *)xbar_chans, + nelm); + if (ret) + return -EIO; + + /* Invalidate last entry for the other user of this mess */ + nelm >>= 1; + xbar_chans[nelm][0] = -1; + xbar_chans[nelm][1] = -1; + + for (i = 0; i < nelm; i++) { + shift = (xbar_chans[i][1] & 0x03) << 3; + offset = xbar_chans[i][1] & 0xfffffffc; + mux = readl(xbar + offset); + mux &= ~(0xff << shift); + mux |= xbar_chans[i][0] << shift; + writel(mux, (xbar + offset)); + } + + pdata->xbar_chans = (const s16 (*)[2]) xbar_chans; + return 0; +} + +static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, + bool legacy_mode) +{ + struct edma_soc_info *info; + struct property *prop; + size_t sz; + int ret; + + info = devm_kzalloc(dev, sizeof(struct edma_soc_info), GFP_KERNEL); + if (!info) + return ERR_PTR(-ENOMEM); + + if (legacy_mode) { + prop = of_find_property(dev->of_node, "ti,edma-xbar-event-map", + &sz); + if (prop) { + ret = edma_xbar_event_map(dev, info, sz); + if (ret) + return ERR_PTR(ret); + } + return info; + } + + /* Get the list of channels allocated to be used for memcpy */ + prop = of_find_property(dev->of_node, "ti,edma-memcpy-channels", &sz); + if (prop) { + const char pname[] = "ti,edma-memcpy-channels"; + size_t nelm = sz / sizeof(s32); + s32 *memcpy_ch; + + memcpy_ch = devm_kcalloc(dev, nelm + 1, sizeof(s32), + GFP_KERNEL); + if (!memcpy_ch) + return ERR_PTR(-ENOMEM); + + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)memcpy_ch, nelm); + if (ret) + return ERR_PTR(ret); + + memcpy_ch[nelm] = -1; + info->memcpy_channels = memcpy_ch; + } + + prop = of_find_property(dev->of_node, "ti,edma-reserved-slot-ranges", + &sz); + if (prop) { + const char pname[] = "ti,edma-reserved-slot-ranges"; + u32 (*tmp)[2]; + s16 (*rsv_slots)[2]; + size_t nelm = sz / sizeof(*tmp); + struct edma_rsv_info *rsv_info; + int i; + + if (!nelm) + return info; + + tmp = kcalloc(nelm, sizeof(*tmp), GFP_KERNEL); + if (!tmp) + return ERR_PTR(-ENOMEM); + + rsv_info = devm_kzalloc(dev, sizeof(*rsv_info), GFP_KERNEL); + if (!rsv_info) { + kfree(tmp); + return ERR_PTR(-ENOMEM); + } + + rsv_slots = devm_kcalloc(dev, nelm + 1, sizeof(*rsv_slots), + GFP_KERNEL); + if (!rsv_slots) { + kfree(tmp); + return ERR_PTR(-ENOMEM); + } - INIT_LIST_HEAD(&dma->channels); + ret = of_property_read_u32_array(dev->of_node, pname, + (u32 *)tmp, nelm * 2); + if (ret) { + kfree(tmp); + return ERR_PTR(ret); + } + + for (i = 0; i < nelm; i++) { + rsv_slots[i][0] = tmp[i][0]; + rsv_slots[i][1] = tmp[i][1]; + } + rsv_slots[nelm][0] = -1; + rsv_slots[nelm][1] = -1; + + info->rsv = rsv_info; + info->rsv->rsv_slots = (const s16 (*)[2])rsv_slots; + + kfree(tmp); + } + + return info; } +static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct edma_cc *ecc = ofdma->of_dma_data; + struct dma_chan *chan = NULL; + struct edma_chan *echan; + int i; + + if (!ecc || dma_spec->args_count < 1) + return NULL; + + for (i = 0; i < ecc->num_channels; i++) { + echan = &ecc->slave_chans[i]; + if (echan->ch_num == dma_spec->args[0]) { + chan = &echan->vchan.chan; + break; + } + } + + if (!chan) + return NULL; + + if (echan->ecc->legacy_mode && dma_spec->args_count == 1) + goto out; + + if (!echan->ecc->legacy_mode && dma_spec->args_count == 2 && + dma_spec->args[1] < echan->ecc->num_tc) { + echan->tc = &echan->ecc->tc_list[dma_spec->args[1]]; + goto out; + } + + return NULL; +out: + /* The channel is going to be used as HW synchronized */ + echan->hw_triggered = true; + return dma_get_slave_channel(chan); +} +#else +static struct edma_soc_info *edma_setup_info_from_dt(struct device *dev, + bool legacy_mode) +{ + return ERR_PTR(-EINVAL); +} + +static struct dma_chan *of_edma_xlate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + return NULL; +} +#endif + static int edma_probe(struct platform_device *pdev) { - struct edma_cc *ecc; + struct edma_soc_info *info = pdev->dev.platform_data; + s8 (*queue_priority_mapping)[2]; + int i, off, ln; + const s16 (*rsv_slots)[2]; + const s16 (*xbar_chans)[2]; + int irq; + char *irq_name; + struct resource *mem; + struct device_node *node = pdev->dev.of_node; + struct device *dev = &pdev->dev; + struct edma_cc *ecc; + bool legacy_mode = true; int ret; - ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (node) { + const struct of_device_id *match; + + match = of_match_node(edma_of_ids, node); + if (match && (u32)match->data == EDMA_BINDING_TPCC) + legacy_mode = false; + + info = edma_setup_info_from_dt(dev, legacy_mode); + if (IS_ERR(info)) { + dev_err(dev, "failed to get DT data\n"); + return PTR_ERR(info); + } + } + + if (!info) + return -ENODEV; + + pm_runtime_enable(dev); + ret = pm_runtime_get_sync(dev); + if (ret < 0) { + dev_err(dev, "pm_runtime_get_sync() failed\n"); + return ret; + } + + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); if (ret) return ret; - ecc = devm_kzalloc(&pdev->dev, sizeof(*ecc), GFP_KERNEL); + ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); if (!ecc) { - dev_err(&pdev->dev, "Can't allocate controller\n"); + dev_err(dev, "Can't allocate controller\n"); return -ENOMEM; } - ecc->ctlr = pdev->id; - ecc->dummy_slot = edma_alloc_slot(ecc->ctlr, EDMA_SLOT_ANY); + ecc->dev = dev; + ecc->id = pdev->id; + ecc->legacy_mode = legacy_mode; + /* When booting with DT the pdev->id is -1 */ + if (ecc->id < 0) + ecc->id = 0; + + mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "edma3_cc"); + if (!mem) { + dev_dbg(dev, "mem resource not found, using index 0\n"); + mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); + if (!mem) { + dev_err(dev, "no mem resource?\n"); + return -ENODEV; + } + } + ecc->base = devm_ioremap_resource(dev, mem); + if (IS_ERR(ecc->base)) + return PTR_ERR(ecc->base); + + platform_set_drvdata(pdev, ecc); + + /* Get eDMA3 configuration from IP */ + ret = edma_setup_from_hw(dev, info, ecc); + if (ret) + return ret; + + /* Allocate memory based on the information we got from the IP */ + ecc->slave_chans = devm_kcalloc(dev, ecc->num_channels, + sizeof(*ecc->slave_chans), GFP_KERNEL); + if (!ecc->slave_chans) + return -ENOMEM; + + ecc->slot_inuse = devm_kcalloc(dev, BITS_TO_LONGS(ecc->num_slots), + sizeof(unsigned long), GFP_KERNEL); + if (!ecc->slot_inuse) + return -ENOMEM; + + ecc->default_queue = info->default_queue; + + for (i = 0; i < ecc->num_slots; i++) + edma_write_slot(ecc, i, &dummy_paramset); + + if (info->rsv) { + /* Set the reserved slots in inuse list */ + rsv_slots = info->rsv->rsv_slots; + if (rsv_slots) { + for (i = 0; rsv_slots[i][0] != -1; i++) { + off = rsv_slots[i][0]; + ln = rsv_slots[i][1]; + set_bits(off, ln, ecc->slot_inuse); + } + } + } + + /* Clear the xbar mapped channels in unused list */ + xbar_chans = info->xbar_chans; + if (xbar_chans) { + for (i = 0; xbar_chans[i][1] != -1; i++) { + off = xbar_chans[i][1]; + } + } + + irq = platform_get_irq_byname(pdev, "edma3_ccint"); + if (irq < 0 && node) + irq = irq_of_parse_and_map(node, 0); + + if (irq >= 0) { + irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccint", + dev_name(dev)); + ret = devm_request_irq(dev, irq, dma_irq_handler, 0, irq_name, + ecc); + if (ret) { + dev_err(dev, "CCINT (%d) failed --> %d\n", irq, ret); + return ret; + } + } + + irq = platform_get_irq_byname(pdev, "edma3_ccerrint"); + if (irq < 0 && node) + irq = irq_of_parse_and_map(node, 2); + + if (irq >= 0) { + irq_name = devm_kasprintf(dev, GFP_KERNEL, "%s_ccerrint", + dev_name(dev)); + ret = devm_request_irq(dev, irq, dma_ccerr_handler, 0, irq_name, + ecc); + if (ret) { + dev_err(dev, "CCERRINT (%d) failed --> %d\n", irq, ret); + return ret; + } + } + + ecc->dummy_slot = edma_alloc_slot(ecc, EDMA_SLOT_ANY); if (ecc->dummy_slot < 0) { - dev_err(&pdev->dev, "Can't allocate PaRAM dummy slot\n"); + dev_err(dev, "Can't allocate PaRAM dummy slot\n"); return ecc->dummy_slot; } - dma_cap_zero(ecc->dma_slave.cap_mask); - dma_cap_set(DMA_SLAVE, ecc->dma_slave.cap_mask); - dma_cap_set(DMA_CYCLIC, ecc->dma_slave.cap_mask); - dma_cap_set(DMA_MEMCPY, ecc->dma_slave.cap_mask); + queue_priority_mapping = info->queue_priority_mapping; + + if (!ecc->legacy_mode) { + int lowest_priority = 0; + struct of_phandle_args tc_args; + + ecc->tc_list = devm_kcalloc(dev, ecc->num_tc, + sizeof(*ecc->tc_list), GFP_KERNEL); + if (!ecc->tc_list) + return -ENOMEM; + + for (i = 0;; i++) { + ret = of_parse_phandle_with_fixed_args(node, "ti,tptcs", + 1, i, &tc_args); + if (ret || i == ecc->num_tc) + break; + + ecc->tc_list[i].node = tc_args.np; + ecc->tc_list[i].id = i; + queue_priority_mapping[i][1] = tc_args.args[0]; + if (queue_priority_mapping[i][1] > lowest_priority) { + lowest_priority = queue_priority_mapping[i][1]; + info->default_queue = i; + } + } + } + + /* Event queue priority mapping */ + for (i = 0; queue_priority_mapping[i][0] != -1; i++) + edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], + queue_priority_mapping[i][1]); - edma_dma_init(ecc, &ecc->dma_slave, &pdev->dev); + for (i = 0; i < ecc->num_region; i++) { + edma_write_array2(ecc, EDMA_DRAE, i, 0, 0x0); + edma_write_array2(ecc, EDMA_DRAE, i, 1, 0x0); + edma_write_array(ecc, EDMA_QRAE, i, 0x0); + } + ecc->info = info; - edma_chan_init(ecc, &ecc->dma_slave, ecc->slave_chans); + /* Init the dma device and channels */ + edma_dma_init(ecc, legacy_mode); + + for (i = 0; i < ecc->num_channels; i++) { + /* Assign all channels to the default queue */ + edma_assign_channel_eventq(&ecc->slave_chans[i], + info->default_queue); + /* Set entry slot to the dummy slot */ + edma_set_chmap(&ecc->slave_chans[i], ecc->dummy_slot); + } ret = dma_async_device_register(&ecc->dma_slave); - if (ret) + if (ret) { + dev_err(dev, "slave ddev registration failed (%d)\n", ret); goto err_reg1; + } - platform_set_drvdata(pdev, ecc); + if (ecc->dma_memcpy) { + ret = dma_async_device_register(ecc->dma_memcpy); + if (ret) { + dev_err(dev, "memcpy ddev registration failed (%d)\n", + ret); + dma_async_device_unregister(&ecc->dma_slave); + goto err_reg1; + } + } - dev_info(&pdev->dev, "TI EDMA DMA engine driver\n"); + if (node) + of_dma_controller_register(node, of_edma_xlate, ecc); + + dev_info(dev, "TI EDMA DMA engine driver\n"); return 0; err_reg1: - edma_free_slot(ecc->dummy_slot); + edma_free_slot(ecc, ecc->dummy_slot); return ret; } @@ -1056,33 +2347,112 @@ static int edma_remove(struct platform_device *pdev) struct device *dev = &pdev->dev; struct edma_cc *ecc = dev_get_drvdata(dev); + if (dev->of_node) + of_dma_controller_free(dev->of_node); dma_async_device_unregister(&ecc->dma_slave); - edma_free_slot(ecc->dummy_slot); + if (ecc->dma_memcpy) + dma_async_device_unregister(ecc->dma_memcpy); + edma_free_slot(ecc, ecc->dummy_slot); + + return 0; +} + +#ifdef CONFIG_PM_SLEEP +static int edma_pm_suspend(struct device *dev) +{ + struct edma_cc *ecc = dev_get_drvdata(dev); + struct edma_chan *echan = ecc->slave_chans; + int i; + + for (i = 0; i < ecc->num_channels; i++) { + if (echan[i].alloced) { + edma_setup_interrupt(&echan[i], false); + edma_tc_set_pm_state(echan[i].tc, false); + } + } return 0; } +static int edma_pm_resume(struct device *dev) +{ + struct edma_cc *ecc = dev_get_drvdata(dev); + struct edma_chan *echan = ecc->slave_chans; + int i; + s8 (*queue_priority_mapping)[2]; + + queue_priority_mapping = ecc->info->queue_priority_mapping; + + /* Event queue priority mapping */ + for (i = 0; queue_priority_mapping[i][0] != -1; i++) + edma_assign_priority_to_queue(ecc, queue_priority_mapping[i][0], + queue_priority_mapping[i][1]); + + for (i = 0; i < ecc->num_channels; i++) { + if (echan[i].alloced) { + /* ensure access through shadow region 0 */ + edma_or_array2(ecc, EDMA_DRAE, 0, i >> 5, + BIT(i & 0x1f)); + + edma_setup_interrupt(&echan[i], true); + + /* Set up channel -> slot mapping for the entry slot */ + edma_set_chmap(&echan[i], echan[i].slot[0]); + + edma_tc_set_pm_state(echan[i].tc, true); + } + } + + return 0; +} +#endif + +static const struct dev_pm_ops edma_pm_ops = { + SET_LATE_SYSTEM_SLEEP_PM_OPS(edma_pm_suspend, edma_pm_resume) +}; + static struct platform_driver edma_driver = { .probe = edma_probe, .remove = edma_remove, .driver = { - .name = "edma-dma-engine", + .name = "edma", + .pm = &edma_pm_ops, + .of_match_table = edma_of_ids, + }, +}; + +static struct platform_driver edma_tptc_driver = { + .driver = { + .name = "edma3-tptc", + .of_match_table = edma_tptc_of_ids, }, }; bool edma_filter_fn(struct dma_chan *chan, void *param) { + bool match = false; + if (chan->device->dev->driver == &edma_driver.driver) { struct edma_chan *echan = to_edma_chan(chan); unsigned ch_req = *(unsigned *)param; - return ch_req == echan->ch_num; + if (ch_req == echan->ch_num) { + /* The channel is going to be used as HW synchronized */ + echan->hw_triggered = true; + match = true; + } } - return false; + return match; } EXPORT_SYMBOL(edma_filter_fn); static int edma_init(void) { + int ret; + + ret = platform_driver_register(&edma_tptc_driver); + if (ret) + return ret; + return platform_driver_register(&edma_driver); } subsys_initcall(edma_init); @@ -1090,6 +2460,7 @@ subsys_initcall(edma_init); static void __exit edma_exit(void) { platform_driver_unregister(&edma_driver); + platform_driver_unregister(&edma_tptc_driver); } module_exit(edma_exit); diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c index 300f821f1..2209f75fd 100644 --- a/drivers/dma/fsldma.c +++ b/drivers/dma/fsldma.c @@ -1512,6 +1512,7 @@ static const struct of_device_id fsldma_of_ids[] = { { .compatible = "fsl,elo-dma", }, {} }; +MODULE_DEVICE_TABLE(of, fsldma_of_ids); static struct platform_driver fsldma_of_driver = { .driver = { diff --git a/drivers/dma/hsu/Kconfig b/drivers/dma/hsu/Kconfig index 2810dca70..c70841731 100644 --- a/drivers/dma/hsu/Kconfig +++ b/drivers/dma/hsu/Kconfig @@ -5,10 +5,5 @@ config HSU_DMA select DMA_VIRTUAL_CHANNELS config HSU_DMA_PCI - tristate "High Speed UART DMA PCI driver" - depends on PCI - select HSU_DMA - help - Support the High Speed UART DMA on the platfroms that - enumerate it as a PCI device. For example, Intel Medfield - has integrated this HSU DMA controller. + tristate + depends on HSU_DMA && PCI diff --git a/drivers/dma/hsu/hsu.c b/drivers/dma/hsu/hsu.c index 7669c7dd1..823ad728a 100644 --- a/drivers/dma/hsu/hsu.c +++ b/drivers/dma/hsu/hsu.c @@ -146,7 +146,7 @@ irqreturn_t hsu_dma_irq(struct hsu_dma_chip *chip, unsigned short nr) u32 sr; /* Sanity check */ - if (nr >= chip->pdata->nr_channels) + if (nr >= chip->hsu->nr_channels) return IRQ_NONE; hsuc = &chip->hsu->chan[nr]; @@ -375,7 +375,6 @@ static void hsu_dma_free_chan_resources(struct dma_chan *chan) int hsu_dma_probe(struct hsu_dma_chip *chip) { struct hsu_dma *hsu; - struct hsu_dma_platform_data *pdata = chip->pdata; void __iomem *addr = chip->regs + chip->offset; unsigned short i; int ret; @@ -386,25 +385,16 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) chip->hsu = hsu; - if (!pdata) { - pdata = devm_kzalloc(chip->dev, sizeof(*pdata), GFP_KERNEL); - if (!pdata) - return -ENOMEM; + /* Calculate nr_channels from the IO space length */ + hsu->nr_channels = (chip->length - chip->offset) / HSU_DMA_CHAN_LENGTH; - chip->pdata = pdata; - - /* Guess nr_channels from the IO space length */ - pdata->nr_channels = (chip->length - chip->offset) / - HSU_DMA_CHAN_LENGTH; - } - - hsu->chan = devm_kcalloc(chip->dev, pdata->nr_channels, + hsu->chan = devm_kcalloc(chip->dev, hsu->nr_channels, sizeof(*hsu->chan), GFP_KERNEL); if (!hsu->chan) return -ENOMEM; INIT_LIST_HEAD(&hsu->dma.channels); - for (i = 0; i < pdata->nr_channels; i++) { + for (i = 0; i < hsu->nr_channels; i++) { struct hsu_dma_chan *hsuc = &hsu->chan[i]; hsuc->vchan.desc_free = hsu_dma_desc_free; @@ -440,7 +430,7 @@ int hsu_dma_probe(struct hsu_dma_chip *chip) if (ret) return ret; - dev_info(chip->dev, "Found HSU DMA, %d channels\n", pdata->nr_channels); + dev_info(chip->dev, "Found HSU DMA, %d channels\n", hsu->nr_channels); return 0; } EXPORT_SYMBOL_GPL(hsu_dma_probe); @@ -452,7 +442,7 @@ int hsu_dma_remove(struct hsu_dma_chip *chip) dma_async_device_unregister(&hsu->dma); - for (i = 0; i < chip->pdata->nr_channels; i++) { + for (i = 0; i < hsu->nr_channels; i++) { struct hsu_dma_chan *hsuc = &hsu->chan[i]; tasklet_kill(&hsuc->vchan.task); diff --git a/drivers/dma/hsu/hsu.h b/drivers/dma/hsu/hsu.h index eeb9fff66..f06579c6d 100644 --- a/drivers/dma/hsu/hsu.h +++ b/drivers/dma/hsu/hsu.h @@ -107,6 +107,7 @@ struct hsu_dma { /* channels */ struct hsu_dma_chan *chan; + unsigned short nr_channels; }; static inline struct hsu_dma *to_hsu_dma(struct dma_device *ddev) diff --git a/drivers/dma/hsu/pci.c b/drivers/dma/hsu/pci.c index 77879e6dd..e2db76bd5 100644 --- a/drivers/dma/hsu/pci.c +++ b/drivers/dma/hsu/pci.c @@ -31,7 +31,7 @@ static irqreturn_t hsu_pci_irq(int irq, void *dev) irqreturn_t ret = IRQ_NONE; dmaisr = readl(chip->regs + HSU_PCI_DMAISR); - for (i = 0; i < chip->pdata->nr_channels; i++) { + for (i = 0; i < chip->hsu->nr_channels; i++) { if (dmaisr & 0x1) ret |= hsu_dma_irq(chip, i); dmaisr >>= 1; diff --git a/drivers/dma/idma64.c b/drivers/dma/idma64.c index 48d6d9e94..7d56b47e4 100644 --- a/drivers/dma/idma64.c +++ b/drivers/dma/idma64.c @@ -65,9 +65,6 @@ static void idma64_chan_init(struct idma64 *idma64, struct idma64_chan *idma64c) u32 cfghi = IDMA64C_CFGH_SRC_PER(1) | IDMA64C_CFGH_DST_PER(0); u32 cfglo = 0; - /* Enforce FIFO drain when channel is suspended */ - cfglo |= IDMA64C_CFGL_CH_DRAIN; - /* Set default burst alignment */ cfglo |= IDMA64C_CFGL_DST_BURST_ALIGN | IDMA64C_CFGL_SRC_BURST_ALIGN; @@ -257,15 +254,15 @@ static u64 idma64_hw_desc_fill(struct idma64_hw_desc *hw, dar = config->dst_addr; ctllo |= IDMA64C_CTLL_DST_FIX | IDMA64C_CTLL_SRC_INC | IDMA64C_CTLL_FC_M2P; - src_width = min_t(u32, 2, __fls(sar | hw->len)); - dst_width = __fls(config->dst_addr_width); + src_width = __ffs(sar | hw->len | 4); + dst_width = __ffs(config->dst_addr_width); } else { /* DMA_DEV_TO_MEM */ sar = config->src_addr; dar = hw->phys; ctllo |= IDMA64C_CTLL_DST_INC | IDMA64C_CTLL_SRC_FIX | IDMA64C_CTLL_FC_P2M; - src_width = __fls(config->src_addr_width); - dst_width = min_t(u32, 2, __fls(dar | hw->len)); + src_width = __ffs(config->src_addr_width); + dst_width = __ffs(dar | hw->len | 4); } lli->sar = sar; @@ -428,12 +425,17 @@ static int idma64_slave_config(struct dma_chan *chan, return 0; } -static void idma64_chan_deactivate(struct idma64_chan *idma64c) +static void idma64_chan_deactivate(struct idma64_chan *idma64c, bool drain) { unsigned short count = 100; u32 cfglo; cfglo = channel_readl(idma64c, CFG_LO); + if (drain) + cfglo |= IDMA64C_CFGL_CH_DRAIN; + else + cfglo &= ~IDMA64C_CFGL_CH_DRAIN; + channel_writel(idma64c, CFG_LO, cfglo | IDMA64C_CFGL_CH_SUSP); do { udelay(1); @@ -456,7 +458,7 @@ static int idma64_pause(struct dma_chan *chan) spin_lock_irqsave(&idma64c->vchan.lock, flags); if (idma64c->desc && idma64c->desc->status == DMA_IN_PROGRESS) { - idma64_chan_deactivate(idma64c); + idma64_chan_deactivate(idma64c, false); idma64c->desc->status = DMA_PAUSED; } spin_unlock_irqrestore(&idma64c->vchan.lock, flags); @@ -486,7 +488,7 @@ static int idma64_terminate_all(struct dma_chan *chan) LIST_HEAD(head); spin_lock_irqsave(&idma64c->vchan.lock, flags); - idma64_chan_deactivate(idma64c); + idma64_chan_deactivate(idma64c, true); idma64_stop_transfer(idma64c); if (idma64c->desc) { idma64_vdesc_free(&idma64c->desc->vdesc); diff --git a/drivers/dma/idma64.h b/drivers/dma/idma64.h index a4d99685a..f6aeff0af 100644 --- a/drivers/dma/idma64.h +++ b/drivers/dma/idma64.h @@ -16,6 +16,8 @@ #include <linux/spinlock.h> #include <linux/types.h> +#include <asm-generic/io-64-nonatomic-lo-hi.h> + #include "virt-dma.h" /* Channel registers */ @@ -166,19 +168,13 @@ static inline void idma64c_writel(struct idma64_chan *idma64c, int offset, static inline u64 idma64c_readq(struct idma64_chan *idma64c, int offset) { - u64 l, h; - - l = idma64c_readl(idma64c, offset); - h = idma64c_readl(idma64c, offset + 4); - - return l | (h << 32); + return lo_hi_readq(idma64c->regs + offset); } static inline void idma64c_writeq(struct idma64_chan *idma64c, int offset, u64 value) { - idma64c_writel(idma64c, offset, value); - idma64c_writel(idma64c, offset + 4, value >> 32); + lo_hi_writeq(value, idma64c->regs + offset); } #define channel_readq(idma64c, reg) \ @@ -217,7 +213,7 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) idma64_writel(idma64, IDMA64_##reg, (value)) /** - * struct idma64_chip - representation of DesignWare DMA controller hardware + * struct idma64_chip - representation of iDMA 64-bit controller hardware * @dev: struct device of the DMA controller * @irq: irq line * @regs: memory mapped I/O space diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c index 97eaa32d8..ff8f98e25 100644 --- a/drivers/dma/imx-sdma.c +++ b/drivers/dma/imx-sdma.c @@ -1462,7 +1462,7 @@ err_firmware: #define EVENT_REMAP_CELLS 3 -static int __init sdma_event_remap(struct sdma_engine *sdma) +static int sdma_event_remap(struct sdma_engine *sdma) { struct device_node *np = sdma->dev->of_node; struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0); @@ -1478,7 +1478,7 @@ static int __init sdma_event_remap(struct sdma_engine *sdma) event_remap = of_find_property(np, propname, NULL); num_map = event_remap ? (event_remap->length / sizeof(u32)) : 0; if (!num_map) { - dev_warn(sdma->dev, "no event needs to be remapped\n"); + dev_dbg(sdma->dev, "no event needs to be remapped\n"); goto out; } else if (num_map % EVENT_REMAP_CELLS) { dev_err(sdma->dev, "the property %s must modulo %d\n", @@ -1826,8 +1826,6 @@ static int sdma_probe(struct platform_device *pdev) of_node_put(spba_bus); } - dev_info(sdma->dev, "initialized\n"); - return 0; err_register: @@ -1852,7 +1850,6 @@ static int sdma_remove(struct platform_device *pdev) } platform_set_drvdata(pdev, NULL); - dev_info(&pdev->dev, "Removed...\n"); return 0; } diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index f66b7e640..1d5df2ef1 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -197,7 +197,8 @@ static void __ioat_start_null_desc(struct ioatdma_chan *ioat_chan) void ioat_start_null_desc(struct ioatdma_chan *ioat_chan) { spin_lock_bh(&ioat_chan->prep_lock); - __ioat_start_null_desc(ioat_chan); + if (!test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + __ioat_start_null_desc(ioat_chan); spin_unlock_bh(&ioat_chan->prep_lock); } diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 1bc084986..8f4e607d5 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -82,8 +82,9 @@ struct ioatdma_device { struct dma_pool *sed_hw_pool[MAX_SED_POOLS]; struct dma_device dma_dev; u8 version; - struct msix_entry msix_entries[4]; - struct ioatdma_chan *idx[4]; +#define IOAT_MAX_CHANS 4 + struct msix_entry msix_entries[IOAT_MAX_CHANS]; + struct ioatdma_chan *idx[IOAT_MAX_CHANS]; struct dca_provider *dca; enum ioat_irq_mode irq_mode; u32 cap; @@ -95,6 +96,7 @@ struct ioatdma_chan { dma_addr_t last_completion; spinlock_t cleanup_lock; unsigned long state; + #define IOAT_CHAN_DOWN 0 #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 diff --git a/drivers/dma/ioat/init.c b/drivers/dma/ioat/init.c index 1c3c9b0ab..4ef0c5e07 100644 --- a/drivers/dma/ioat/init.c +++ b/drivers/dma/ioat/init.c @@ -27,6 +27,7 @@ #include <linux/workqueue.h> #include <linux/prefetch.h> #include <linux/dca.h> +#include <linux/aer.h> #include "dma.h" #include "registers.h" #include "hw.h" @@ -1186,13 +1187,116 @@ static int ioat3_dma_probe(struct ioatdma_device *ioat_dma, int dca) return 0; } +static void ioat_shutdown(struct pci_dev *pdev) +{ + struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); + struct ioatdma_chan *ioat_chan; + int i; + + if (!ioat_dma) + return; + + for (i = 0; i < IOAT_MAX_CHANS; i++) { + ioat_chan = ioat_dma->idx[i]; + if (!ioat_chan) + continue; + + spin_lock_bh(&ioat_chan->prep_lock); + set_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + del_timer_sync(&ioat_chan->timer); + spin_unlock_bh(&ioat_chan->prep_lock); + /* this should quiesce then reset */ + ioat_reset_hw(ioat_chan); + } + + ioat_disable_interrupts(ioat_dma); +} + +void ioat_resume(struct ioatdma_device *ioat_dma) +{ + struct ioatdma_chan *ioat_chan; + u32 chanerr; + int i; + + for (i = 0; i < IOAT_MAX_CHANS; i++) { + ioat_chan = ioat_dma->idx[i]; + if (!ioat_chan) + continue; + + spin_lock_bh(&ioat_chan->prep_lock); + clear_bit(IOAT_CHAN_DOWN, &ioat_chan->state); + spin_unlock_bh(&ioat_chan->prep_lock); + + chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + writel(chanerr, ioat_chan->reg_base + IOAT_CHANERR_OFFSET); + + /* no need to reset as shutdown already did that */ + } +} + #define DRV_NAME "ioatdma" +static pci_ers_result_t ioat_pcie_error_detected(struct pci_dev *pdev, + enum pci_channel_state error) +{ + dev_dbg(&pdev->dev, "%s: PCIe AER error %d\n", DRV_NAME, error); + + /* quiesce and block I/O */ + ioat_shutdown(pdev); + + return PCI_ERS_RESULT_NEED_RESET; +} + +static pci_ers_result_t ioat_pcie_error_slot_reset(struct pci_dev *pdev) +{ + pci_ers_result_t result = PCI_ERS_RESULT_RECOVERED; + int err; + + dev_dbg(&pdev->dev, "%s post reset handling\n", DRV_NAME); + + if (pci_enable_device_mem(pdev) < 0) { + dev_err(&pdev->dev, + "Failed to enable PCIe device after reset.\n"); + result = PCI_ERS_RESULT_DISCONNECT; + } else { + pci_set_master(pdev); + pci_restore_state(pdev); + pci_save_state(pdev); + pci_wake_from_d3(pdev, false); + } + + err = pci_cleanup_aer_uncorrect_error_status(pdev); + if (err) { + dev_err(&pdev->dev, + "AER uncorrect error status clear failed: %#x\n", err); + } + + return result; +} + +static void ioat_pcie_error_resume(struct pci_dev *pdev) +{ + struct ioatdma_device *ioat_dma = pci_get_drvdata(pdev); + + dev_dbg(&pdev->dev, "%s: AER handling resuming\n", DRV_NAME); + + /* initialize and bring everything back */ + ioat_resume(ioat_dma); +} + +static const struct pci_error_handlers ioat_err_handler = { + .error_detected = ioat_pcie_error_detected, + .slot_reset = ioat_pcie_error_slot_reset, + .resume = ioat_pcie_error_resume, +}; + static struct pci_driver ioat_pci_driver = { .name = DRV_NAME, .id_table = ioat_pci_tbl, .probe = ioat_pci_probe, .remove = ioat_remove, + .shutdown = ioat_shutdown, + .err_handler = &ioat_err_handler, }; static struct ioatdma_device * @@ -1245,13 +1349,17 @@ static int ioat_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) pci_set_drvdata(pdev, device); device->version = readb(device->reg_base + IOAT_VER_OFFSET); - if (device->version >= IOAT_VER_3_0) + if (device->version >= IOAT_VER_3_0) { err = ioat3_dma_probe(device, ioat_dca_enabled); - else + + if (device->version >= IOAT_VER_3_3) + pci_enable_pcie_error_reporting(pdev); + } else return -ENODEV; if (err) { dev_err(dev, "Intel(R) I/OAT DMA Engine init failed\n"); + pci_disable_pcie_error_reporting(pdev); return -ENODEV; } @@ -1271,6 +1379,8 @@ static void ioat_remove(struct pci_dev *pdev) free_dca_provider(device->dca); device->dca = NULL; } + + pci_disable_pcie_error_reporting(pdev); ioat_dma_remove(device); } diff --git a/drivers/dma/ioat/prep.c b/drivers/dma/ioat/prep.c index ad4fb41cd..6bb4a13a8 100644 --- a/drivers/dma/ioat/prep.c +++ b/drivers/dma/ioat/prep.c @@ -121,6 +121,9 @@ ioat_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, size_t total_len = len; int num_descs, idx, i; + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + num_descs = ioat_xferlen_to_descs(ioat_chan, len); if (likely(num_descs) && ioat_check_space_lock(ioat_chan, num_descs) == 0) @@ -254,6 +257,11 @@ struct dma_async_tx_descriptor * ioat_prep_xor(struct dma_chan *chan, dma_addr_t dest, dma_addr_t *src, unsigned int src_cnt, size_t len, unsigned long flags) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + return __ioat_prep_xor_lock(chan, NULL, dest, src, src_cnt, len, flags); } @@ -262,6 +270,11 @@ ioat_prep_xor_val(struct dma_chan *chan, dma_addr_t *src, unsigned int src_cnt, size_t len, enum sum_check_flags *result, unsigned long flags) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + /* the cleanup routine only sets bits on validate failure, it * does not clear bits on validate success... so clear it here */ @@ -574,6 +587,11 @@ ioat_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, unsigned long flags) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) dst[0] = dst[1]; @@ -614,6 +632,11 @@ ioat_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src, unsigned int src_cnt, const unsigned char *scf, size_t len, enum sum_check_flags *pqres, unsigned long flags) { + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + /* specify valid address for disabled result */ if (flags & DMA_PREP_PQ_DISABLE_P) pq[0] = pq[1]; @@ -638,6 +661,10 @@ ioat_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src, { unsigned char scf[MAX_SCF]; dma_addr_t pq[2]; + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; if (src_cnt > MAX_SCF) return NULL; @@ -661,6 +688,10 @@ ioat_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src, { unsigned char scf[MAX_SCF]; dma_addr_t pq[2]; + struct ioatdma_chan *ioat_chan = to_ioat_chan(chan); + + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; if (src_cnt > MAX_SCF) return NULL; @@ -689,6 +720,9 @@ ioat_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; + if (test_bit(IOAT_CHAN_DOWN, &ioat_chan->state)) + return NULL; + if (ioat_check_space_lock(ioat_chan, 1) == 0) desc = ioat_get_ring_ent(ioat_chan, ioat_chan->head); else diff --git a/drivers/dma/mic_x100_dma.c b/drivers/dma/mic_x100_dma.c index 74d9db05a..068e920ec 100644 --- a/drivers/dma/mic_x100_dma.c +++ b/drivers/dma/mic_x100_dma.c @@ -193,8 +193,16 @@ static void mic_dma_prog_intr(struct mic_dma_chan *ch) static int mic_dma_do_dma(struct mic_dma_chan *ch, int flags, dma_addr_t src, dma_addr_t dst, size_t len) { - if (-ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) + if (len && -ENOMEM == mic_dma_prog_memcpy_desc(ch, src, dst, len)) { return -ENOMEM; + } else { + /* 3 is the maximum number of status descriptors */ + int ret = mic_dma_avail_desc_ring_space(ch, 3); + + if (ret < 0) + return ret; + } + /* Above mic_dma_prog_memcpy_desc() makes sure we have enough space */ if (flags & DMA_PREP_FENCE) { mic_dma_prep_status_desc(&ch->desc_ring[ch->head], 0, @@ -270,6 +278,33 @@ allocate_tx(struct mic_dma_chan *ch) return tx; } +/* Program a status descriptor with dst as address and value to be written */ +static struct dma_async_tx_descriptor * +mic_dma_prep_status_lock(struct dma_chan *ch, dma_addr_t dst, u64 src_val, + unsigned long flags) +{ + struct mic_dma_chan *mic_ch = to_mic_dma_chan(ch); + int result; + + spin_lock(&mic_ch->prep_lock); + result = mic_dma_avail_desc_ring_space(mic_ch, 4); + if (result < 0) + goto error; + mic_dma_prep_status_desc(&mic_ch->desc_ring[mic_ch->head], src_val, dst, + false); + mic_dma_hw_ring_inc_head(mic_ch); + result = mic_dma_do_dma(mic_ch, flags, 0, 0, 0); + if (result < 0) + goto error; + + return allocate_tx(mic_ch); +error: + dev_err(mic_dma_ch_to_device(mic_ch), + "Error enqueueing dma status descriptor, error=%d\n", result); + spin_unlock(&mic_ch->prep_lock); + return NULL; +} + /* * Prepare a memcpy descriptor to be added to the ring. * Note that the temporary descriptor adds an extra overhead of copying the @@ -587,6 +622,8 @@ static int mic_dma_register_dma_device(struct mic_dma_device *mic_dma_dev, mic_dma_free_chan_resources; mic_dma_dev->dma_dev.device_tx_status = mic_dma_tx_status; mic_dma_dev->dma_dev.device_prep_dma_memcpy = mic_dma_prep_memcpy_lock; + mic_dma_dev->dma_dev.device_prep_dma_imm_data = + mic_dma_prep_status_lock; mic_dma_dev->dma_dev.device_prep_dma_interrupt = mic_dma_prep_interrupt_lock; mic_dma_dev->dma_dev.device_issue_pending = mic_dma_issue_pending; diff --git a/drivers/dma/moxart-dma.c b/drivers/dma/moxart-dma.c index b4634109e..631c4435e 100644 --- a/drivers/dma/moxart-dma.c +++ b/drivers/dma/moxart-dma.c @@ -652,6 +652,7 @@ static const struct of_device_id moxart_dma_match[] = { { .compatible = "moxa,moxart-dma" }, { } }; +MODULE_DEVICE_TABLE(of, moxart_dma_match); static struct platform_driver moxart_driver = { .probe = moxart_probe, diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c index e6281e7aa..aae76fb39 100644 --- a/drivers/dma/mpc512x_dma.c +++ b/drivers/dma/mpc512x_dma.c @@ -1073,6 +1073,7 @@ static const struct of_device_id mpc_dma_match[] = { { .compatible = "fsl,mpc8308-dma", }, {}, }; +MODULE_DEVICE_TABLE(of, mpc_dma_match); static struct platform_driver mpc_dma_driver = { .probe = mpc_dma_probe, diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c index 249445c8a..1dfc71c90 100644 --- a/drivers/dma/omap-dma.c +++ b/drivers/dma/omap-dma.c @@ -935,8 +935,12 @@ static struct dma_async_tx_descriptor *omap_dma_prep_dma_cyclic( else d->ccr |= CCR_SYNC_ELEMENT; - if (dir == DMA_DEV_TO_MEM) + if (dir == DMA_DEV_TO_MEM) { d->ccr |= CCR_TRIGGER_SRC; + d->csdp |= CSDP_DST_PACKED; + } else { + d->csdp |= CSDP_SRC_PACKED; + } d->cicr |= CICR_MISALIGNED_ERR_IE | CICR_TRANS_ERR_IE; diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c index ebd8a5f39..f1bcc2a16 100644 --- a/drivers/dma/sh/usb-dmac.c +++ b/drivers/dma/sh/usb-dmac.c @@ -679,8 +679,11 @@ static int usb_dmac_runtime_suspend(struct device *dev) struct usb_dmac *dmac = dev_get_drvdata(dev); int i; - for (i = 0; i < dmac->n_channels; ++i) + for (i = 0; i < dmac->n_channels; ++i) { + if (!dmac->channels[i].iomem) + break; usb_dmac_chan_halt(&dmac->channels[i]); + } return 0; } @@ -799,11 +802,10 @@ static int usb_dmac_probe(struct platform_device *pdev) ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret); - return ret; + goto error_pm; } ret = usb_dmac_init(dmac); - pm_runtime_put(&pdev->dev); if (ret) { dev_err(&pdev->dev, "failed to reset device\n"); @@ -851,10 +853,13 @@ static int usb_dmac_probe(struct platform_device *pdev) if (ret < 0) goto error; + pm_runtime_put(&pdev->dev); return 0; error: of_dma_controller_free(pdev->dev.of_node); + pm_runtime_put(&pdev->dev); +error_pm: pm_runtime_disable(&pdev->dev); return ret; } diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c index 7d5598d87..22ea2419e 100644 --- a/drivers/dma/sirf-dma.c +++ b/drivers/dma/sirf-dma.c @@ -1149,6 +1149,7 @@ static const struct of_device_id sirfsoc_dma_match[] = { { .compatible = "sirf,atlas7-dmac-v2", .data = &sirfsoc_dmadata_a7v2,}, {}, }; +MODULE_DEVICE_TABLE(of, sirfsoc_dma_match); static struct platform_driver sirfsoc_dma_driver = { .probe = sirfsoc_dma_probe, diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c index 750d1b313..dd3e7ba27 100644 --- a/drivers/dma/ste_dma40.c +++ b/drivers/dma/ste_dma40.c @@ -2907,7 +2907,7 @@ static int __init d40_dmaengine_init(struct d40_base *base, if (err) { d40_err(base->dev, - "Failed to regsiter memcpy only channels\n"); + "Failed to register memcpy only channels\n"); goto failure2; } diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c index 73e0be6e2..2db12e493 100644 --- a/drivers/dma/sun6i-dma.c +++ b/drivers/dma/sun6i-dma.c @@ -908,6 +908,7 @@ static const struct of_device_id sun6i_dma_match[] = { { .compatible = "allwinner,sun8i-h3-dma", .data = &sun8i_h3_dma_cfg }, { /* sentinel */ } }; +MODULE_DEVICE_TABLE(of, sun6i_dma_match); static int sun6i_dma_probe(struct platform_device *pdev) { diff --git a/drivers/dma/ti-dma-crossbar.c b/drivers/dma/ti-dma-crossbar.c index 5cce8c9d0..a415edbe6 100644 --- a/drivers/dma/ti-dma-crossbar.c +++ b/drivers/dma/ti-dma-crossbar.c @@ -17,13 +17,184 @@ #include <linux/of_device.h> #include <linux/of_dma.h> -#define TI_XBAR_OUTPUTS 127 -#define TI_XBAR_INPUTS 256 +#define TI_XBAR_DRA7 0 +#define TI_XBAR_AM335X 1 + +static const struct of_device_id ti_dma_xbar_match[] = { + { + .compatible = "ti,dra7-dma-crossbar", + .data = (void *)TI_XBAR_DRA7, + }, + { + .compatible = "ti,am335x-edma-crossbar", + .data = (void *)TI_XBAR_AM335X, + }, + {}, +}; + +/* Crossbar on AM335x/AM437x family */ +#define TI_AM335X_XBAR_LINES 64 + +struct ti_am335x_xbar_data { + void __iomem *iomem; + + struct dma_router dmarouter; + + u32 xbar_events; /* maximum number of events to select in xbar */ + u32 dma_requests; /* number of DMA requests on eDMA */ +}; + +struct ti_am335x_xbar_map { + u16 dma_line; + u16 mux_val; +}; + +static inline void ti_am335x_xbar_write(void __iomem *iomem, int event, u16 val) +{ + writeb_relaxed(val & 0x1f, iomem + event); +} + +static void ti_am335x_xbar_free(struct device *dev, void *route_data) +{ + struct ti_am335x_xbar_data *xbar = dev_get_drvdata(dev); + struct ti_am335x_xbar_map *map = route_data; + + dev_dbg(dev, "Unmapping XBAR event %u on channel %u\n", + map->mux_val, map->dma_line); + + ti_am335x_xbar_write(xbar->iomem, map->dma_line, 0); + kfree(map); +} + +static void *ti_am335x_xbar_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) +{ + struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); + struct ti_am335x_xbar_data *xbar = platform_get_drvdata(pdev); + struct ti_am335x_xbar_map *map; + + if (dma_spec->args_count != 3) + return ERR_PTR(-EINVAL); + + if (dma_spec->args[2] >= xbar->xbar_events) { + dev_err(&pdev->dev, "Invalid XBAR event number: %d\n", + dma_spec->args[2]); + return ERR_PTR(-EINVAL); + } + + if (dma_spec->args[0] >= xbar->dma_requests) { + dev_err(&pdev->dev, "Invalid DMA request line number: %d\n", + dma_spec->args[0]); + return ERR_PTR(-EINVAL); + } + + /* The of_node_put() will be done in the core for the node */ + dma_spec->np = of_parse_phandle(ofdma->of_node, "dma-masters", 0); + if (!dma_spec->np) { + dev_err(&pdev->dev, "Can't get DMA master\n"); + return ERR_PTR(-EINVAL); + } + + map = kzalloc(sizeof(*map), GFP_KERNEL); + if (!map) { + of_node_put(dma_spec->np); + return ERR_PTR(-ENOMEM); + } + + map->dma_line = (u16)dma_spec->args[0]; + map->mux_val = (u16)dma_spec->args[2]; + + dma_spec->args[2] = 0; + dma_spec->args_count = 2; + + dev_dbg(&pdev->dev, "Mapping XBAR event%u to DMA%u\n", + map->mux_val, map->dma_line); + + ti_am335x_xbar_write(xbar->iomem, map->dma_line, map->mux_val); + + return map; +} + +static const struct of_device_id ti_am335x_master_match[] = { + { .compatible = "ti,edma3-tpcc", }, + {}, +}; + +static int ti_am335x_xbar_probe(struct platform_device *pdev) +{ + struct device_node *node = pdev->dev.of_node; + const struct of_device_id *match; + struct device_node *dma_node; + struct ti_am335x_xbar_data *xbar; + struct resource *res; + void __iomem *iomem; + int i, ret; + + if (!node) + return -ENODEV; + + xbar = devm_kzalloc(&pdev->dev, sizeof(*xbar), GFP_KERNEL); + if (!xbar) + return -ENOMEM; + + dma_node = of_parse_phandle(node, "dma-masters", 0); + if (!dma_node) { + dev_err(&pdev->dev, "Can't get DMA master node\n"); + return -ENODEV; + } + + match = of_match_node(ti_am335x_master_match, dma_node); + if (!match) { + dev_err(&pdev->dev, "DMA master is not supported\n"); + return -EINVAL; + } + + if (of_property_read_u32(dma_node, "dma-requests", + &xbar->dma_requests)) { + dev_info(&pdev->dev, + "Missing XBAR output information, using %u.\n", + TI_AM335X_XBAR_LINES); + xbar->dma_requests = TI_AM335X_XBAR_LINES; + } + of_node_put(dma_node); + + if (of_property_read_u32(node, "dma-requests", &xbar->xbar_events)) { + dev_info(&pdev->dev, + "Missing XBAR input information, using %u.\n", + TI_AM335X_XBAR_LINES); + xbar->xbar_events = TI_AM335X_XBAR_LINES; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + iomem = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(iomem)) + return PTR_ERR(iomem); + + xbar->iomem = iomem; + + xbar->dmarouter.dev = &pdev->dev; + xbar->dmarouter.route_free = ti_am335x_xbar_free; + + platform_set_drvdata(pdev, xbar); + + /* Reset the crossbar */ + for (i = 0; i < xbar->dma_requests; i++) + ti_am335x_xbar_write(xbar->iomem, i, 0); + + ret = of_dma_router_register(node, ti_am335x_xbar_route_allocate, + &xbar->dmarouter); + + return ret; +} + +/* Crossbar on DRA7xx family */ +#define TI_DRA7_XBAR_OUTPUTS 127 +#define TI_DRA7_XBAR_INPUTS 256 #define TI_XBAR_EDMA_OFFSET 0 #define TI_XBAR_SDMA_OFFSET 1 -struct ti_dma_xbar_data { +struct ti_dra7_xbar_data { void __iomem *iomem; struct dma_router dmarouter; @@ -35,35 +206,35 @@ struct ti_dma_xbar_data { u32 dma_offset; }; -struct ti_dma_xbar_map { +struct ti_dra7_xbar_map { u16 xbar_in; int xbar_out; }; -static inline void ti_dma_xbar_write(void __iomem *iomem, int xbar, u16 val) +static inline void ti_dra7_xbar_write(void __iomem *iomem, int xbar, u16 val) { writew_relaxed(val, iomem + (xbar * 2)); } -static void ti_dma_xbar_free(struct device *dev, void *route_data) +static void ti_dra7_xbar_free(struct device *dev, void *route_data) { - struct ti_dma_xbar_data *xbar = dev_get_drvdata(dev); - struct ti_dma_xbar_map *map = route_data; + struct ti_dra7_xbar_data *xbar = dev_get_drvdata(dev); + struct ti_dra7_xbar_map *map = route_data; dev_dbg(dev, "Unmapping XBAR%u (was routed to %d)\n", map->xbar_in, map->xbar_out); - ti_dma_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); + ti_dra7_xbar_write(xbar->iomem, map->xbar_out, xbar->safe_val); idr_remove(&xbar->map_idr, map->xbar_out); kfree(map); } -static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, - struct of_dma *ofdma) +static void *ti_dra7_xbar_route_allocate(struct of_phandle_args *dma_spec, + struct of_dma *ofdma) { struct platform_device *pdev = of_find_device_by_node(ofdma->of_node); - struct ti_dma_xbar_data *xbar = platform_get_drvdata(pdev); - struct ti_dma_xbar_map *map; + struct ti_dra7_xbar_data *xbar = platform_get_drvdata(pdev); + struct ti_dra7_xbar_map *map; if (dma_spec->args[0] >= xbar->xbar_requests) { dev_err(&pdev->dev, "Invalid XBAR request number: %d\n", @@ -93,12 +264,12 @@ static void *ti_dma_xbar_route_allocate(struct of_phandle_args *dma_spec, dev_dbg(&pdev->dev, "Mapping XBAR%u to DMA%d\n", map->xbar_in, map->xbar_out); - ti_dma_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); + ti_dra7_xbar_write(xbar->iomem, map->xbar_out, map->xbar_in); return map; } -static const struct of_device_id ti_dma_master_match[] = { +static const struct of_device_id ti_dra7_master_match[] = { { .compatible = "ti,omap4430-sdma", .data = (void *)TI_XBAR_SDMA_OFFSET, @@ -110,12 +281,12 @@ static const struct of_device_id ti_dma_master_match[] = { {}, }; -static int ti_dma_xbar_probe(struct platform_device *pdev) +static int ti_dra7_xbar_probe(struct platform_device *pdev) { struct device_node *node = pdev->dev.of_node; const struct of_device_id *match; struct device_node *dma_node; - struct ti_dma_xbar_data *xbar; + struct ti_dra7_xbar_data *xbar; struct resource *res; u32 safe_val; void __iomem *iomem; @@ -136,7 +307,7 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) return -ENODEV; } - match = of_match_node(ti_dma_master_match, dma_node); + match = of_match_node(ti_dra7_master_match, dma_node); if (!match) { dev_err(&pdev->dev, "DMA master is not supported\n"); return -EINVAL; @@ -146,16 +317,16 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) &xbar->dma_requests)) { dev_info(&pdev->dev, "Missing XBAR output information, using %u.\n", - TI_XBAR_OUTPUTS); - xbar->dma_requests = TI_XBAR_OUTPUTS; + TI_DRA7_XBAR_OUTPUTS); + xbar->dma_requests = TI_DRA7_XBAR_OUTPUTS; } of_node_put(dma_node); if (of_property_read_u32(node, "dma-requests", &xbar->xbar_requests)) { dev_info(&pdev->dev, "Missing XBAR input information, using %u.\n", - TI_XBAR_INPUTS); - xbar->xbar_requests = TI_XBAR_INPUTS; + TI_DRA7_XBAR_INPUTS); + xbar->xbar_requests = TI_DRA7_XBAR_INPUTS; } if (!of_property_read_u32(node, "ti,dma-safe-map", &safe_val)) @@ -169,30 +340,50 @@ static int ti_dma_xbar_probe(struct platform_device *pdev) xbar->iomem = iomem; xbar->dmarouter.dev = &pdev->dev; - xbar->dmarouter.route_free = ti_dma_xbar_free; + xbar->dmarouter.route_free = ti_dra7_xbar_free; xbar->dma_offset = (u32)match->data; platform_set_drvdata(pdev, xbar); /* Reset the crossbar */ for (i = 0; i < xbar->dma_requests; i++) - ti_dma_xbar_write(xbar->iomem, i, xbar->safe_val); + ti_dra7_xbar_write(xbar->iomem, i, xbar->safe_val); - ret = of_dma_router_register(node, ti_dma_xbar_route_allocate, + ret = of_dma_router_register(node, ti_dra7_xbar_route_allocate, &xbar->dmarouter); if (ret) { /* Restore the defaults for the crossbar */ for (i = 0; i < xbar->dma_requests; i++) - ti_dma_xbar_write(xbar->iomem, i, i); + ti_dra7_xbar_write(xbar->iomem, i, i); } return ret; } -static const struct of_device_id ti_dma_xbar_match[] = { - { .compatible = "ti,dra7-dma-crossbar" }, - {}, -}; +static int ti_dma_xbar_probe(struct platform_device *pdev) +{ + const struct of_device_id *match; + int ret; + + match = of_match_node(ti_dma_xbar_match, pdev->dev.of_node); + if (unlikely(!match)) + return -EINVAL; + + switch ((u32)match->data) { + case TI_XBAR_DRA7: + ret = ti_dra7_xbar_probe(pdev); + break; + case TI_XBAR_AM335X: + ret = ti_am335x_xbar_probe(pdev); + break; + default: + dev_err(&pdev->dev, "Unsupported crossbar\n"); + ret = -ENODEV; + break; + } + + return ret; +} static struct platform_driver ti_dma_xbar_driver = { .driver = { diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h index 181b95267..2fa47745a 100644 --- a/drivers/dma/virt-dma.h +++ b/drivers/dma/virt-dma.h @@ -47,9 +47,9 @@ struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t); /** * vchan_tx_prep - prepare a descriptor - * vc: virtual channel allocating this descriptor - * vd: virtual descriptor to prepare - * tx_flags: flags argument passed in to prepare function + * @vc: virtual channel allocating this descriptor + * @vd: virtual descriptor to prepare + * @tx_flags: flags argument passed in to prepare function */ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, unsigned long tx_flags) @@ -65,7 +65,7 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan /** * vchan_issue_pending - move submitted descriptors to issued list - * vc: virtual channel to update + * @vc: virtual channel to update * * vc.lock must be held by caller */ @@ -77,7 +77,7 @@ static inline bool vchan_issue_pending(struct virt_dma_chan *vc) /** * vchan_cookie_complete - report completion of a descriptor - * vd: virtual descriptor to update + * @vd: virtual descriptor to update * * vc.lock must be held by caller */ @@ -97,7 +97,7 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) /** * vchan_cyclic_callback - report the completion of a period - * vd: virtual descriptor + * @vd: virtual descriptor */ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) { @@ -109,7 +109,7 @@ static inline void vchan_cyclic_callback(struct virt_dma_desc *vd) /** * vchan_next_desc - peek at the next descriptor to be processed - * vc: virtual channel to obtain descriptor from + * @vc: virtual channel to obtain descriptor from * * vc.lock must be held by caller */ @@ -123,8 +123,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) /** * vchan_get_all_descriptors - obtain all submitted and issued descriptors - * vc: virtual channel to get descriptors from - * head: list of descriptors found + * @vc: virtual channel to get descriptors from + * @head: list of descriptors found * * vc.lock must be held by caller * diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c index 8d57b1b12..9cb93c5b6 100644 --- a/drivers/dma/xgene-dma.c +++ b/drivers/dma/xgene-dma.c @@ -29,6 +29,7 @@ #include <linux/dmapool.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/irq.h> #include <linux/module.h> #include <linux/of_device.h> @@ -547,14 +548,12 @@ static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor( struct xgene_dma_desc_sw *desc; dma_addr_t phys; - desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys); + desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys); if (!desc) { chan_err(chan, "Failed to allocate LDs\n"); return NULL; } - memset(desc, 0, sizeof(*desc)); - INIT_LIST_HEAD(&desc->tx_list); desc->tx.phys = phys; desc->tx.tx_submit = xgene_dma_tx_submit; @@ -894,60 +893,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan) chan->desc_pool = NULL; } -static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy( - struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, - size_t len, unsigned long flags) -{ - struct xgene_dma_desc_sw *first = NULL, *new; - struct xgene_dma_chan *chan; - size_t copy; - - if (unlikely(!dchan || !len)) - return NULL; - - chan = to_dma_chan(dchan); - - do { - /* Allocate the link descriptor from DMA pool */ - new = xgene_dma_alloc_descriptor(chan); - if (!new) - goto fail; - - /* Create the largest transaction possible */ - copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT); - - /* Prepare DMA descriptor */ - xgene_dma_prep_cpy_desc(chan, new, dst, src, copy); - - if (!first) - first = new; - - new->tx.cookie = 0; - async_tx_ack(&new->tx); - - /* Update metadata */ - len -= copy; - dst += copy; - src += copy; - - /* Insert the link descriptor to the LD ring */ - list_add_tail(&new->node, &first->tx_list); - } while (len); - - new->tx.flags = flags; /* client is in control of this ack */ - new->tx.cookie = -EBUSY; - list_splice(&first->tx_list, &new->tx_list); - - return &new->tx; - -fail: - if (!first) - return NULL; - - xgene_dma_free_desc_list(chan, &first->tx_list); - return NULL; -} - static struct dma_async_tx_descriptor *xgene_dma_prep_sg( struct dma_chan *dchan, struct scatterlist *dst_sg, u32 dst_nents, struct scatterlist *src_sg, @@ -1666,6 +1611,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) /* Register DMA channel rx irq */ for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_set_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); ret = devm_request_irq(chan->dev, chan->rx_irq, xgene_dma_chan_ring_isr, 0, chan->name, chan); @@ -1676,6 +1622,7 @@ static int xgene_dma_request_irqs(struct xgene_dma *pdma) for (j = 0; j < i; j++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } @@ -1696,6 +1643,7 @@ static void xgene_dma_free_irqs(struct xgene_dma *pdma) for (i = 0; i < XGENE_DMA_MAX_CHANNEL; i++) { chan = &pdma->chan[i]; + irq_clear_status_flags(chan->rx_irq, IRQ_DISABLE_UNLAZY); devm_free_irq(chan->dev, chan->rx_irq, chan); } } @@ -1707,7 +1655,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_cap_zero(dma_dev->cap_mask); /* Set DMA device capability */ - dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); dma_cap_set(DMA_SG, dma_dev->cap_mask); /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR @@ -1734,7 +1681,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan, dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; dma_dev->device_issue_pending = xgene_dma_issue_pending; dma_dev->device_tx_status = xgene_dma_tx_status; - dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy; dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { @@ -1787,8 +1733,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id) /* DMA capability info */ dev_info(pdma->dev, - "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), - dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "", + "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan), dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index d8434d465..6f4b5017c 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c @@ -1349,6 +1349,7 @@ static const struct of_device_id xilinx_vdma_of_ids[] = { { .compatible = "xlnx,axi-vdma-1.00.a",}, {} }; +MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids); static struct platform_driver xilinx_vdma_driver = { .driver = { diff --git a/drivers/dma/zx296702_dma.c b/drivers/dma/zx296702_dma.c index c017fcd8e..245d759d5 100644 --- a/drivers/dma/zx296702_dma.c +++ b/drivers/dma/zx296702_dma.c @@ -441,7 +441,7 @@ static struct zx_dma_desc_sw *zx_alloc_desc_resource(int num, kfree(ds); return NULL; } - memset(ds->desc_hw, sizeof(struct zx_desc_hw) * num, 0); + memset(ds->desc_hw, 0, sizeof(struct zx_desc_hw) * num); ds->desc_num = num; return ds; } |