From d0b2f91bede3bd5e3d24dd6803e56eee959c1797 Mon Sep 17 00:00:00 2001 From: André Fabian Silva Delgado Date: Thu, 20 Oct 2016 00:10:27 -0300 Subject: Linux-libre 4.8.2-gnu --- drivers/hsi/Makefile | 3 +- drivers/hsi/clients/cmt_speech.c | 4 +- drivers/hsi/clients/ssi_protocol.c | 110 +++-- drivers/hsi/controllers/omap_ssi.h | 18 +- drivers/hsi/controllers/omap_ssi_core.c | 46 +- drivers/hsi/controllers/omap_ssi_port.c | 226 +++++---- drivers/hsi/hsi_core.c | 781 ++++++++++++++++++++++++++++++++ 7 files changed, 1009 insertions(+), 179 deletions(-) create mode 100644 drivers/hsi/hsi_core.c (limited to 'drivers/hsi') diff --git a/drivers/hsi/Makefile b/drivers/hsi/Makefile index 360371e13..96944783d 100644 --- a/drivers/hsi/Makefile +++ b/drivers/hsi/Makefile @@ -1,7 +1,8 @@ # # Makefile for HSI # -obj-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-$(CONFIG_HSI) += hsi.o +hsi-objs := hsi_core.o +hsi-$(CONFIG_HSI_BOARDINFO) += hsi_boardinfo.o obj-y += controllers/ obj-y += clients/ diff --git a/drivers/hsi/clients/cmt_speech.c b/drivers/hsi/clients/cmt_speech.c index 95638df73..3deef6cc7 100644 --- a/drivers/hsi/clients/cmt_speech.c +++ b/drivers/hsi/clients/cmt_speech.c @@ -444,8 +444,8 @@ static void cs_hsi_read_on_control_complete(struct hsi_msg *msg) hi->control_state &= ~SSI_CHANNEL_STATE_READING; if (msg->status == HSI_STATUS_ERROR) { dev_err(&hi->cl->device, "Control RX error detected\n"); - cs_hsi_control_read_error(hi, msg); spin_unlock(&hi->lock); + cs_hsi_control_read_error(hi, msg); goto out; } dev_dbg(&hi->cl->device, "Read on control: %08X\n", cmd); @@ -1275,7 +1275,7 @@ static int cs_char_mmap(struct file *file, struct vm_area_struct *vma) if (vma->vm_end < vma->vm_start) return -EINVAL; - if (((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) != 1) + if (vma_pages(vma) != 1) return -EINVAL; vma->vm_flags |= VM_IO | VM_DONTDUMP | VM_DONTEXPAND; diff --git a/drivers/hsi/clients/ssi_protocol.c b/drivers/hsi/clients/ssi_protocol.c index 6595d2091..6031cd146 100644 --- a/drivers/hsi/clients/ssi_protocol.c +++ b/drivers/hsi/clients/ssi_protocol.c @@ -88,6 +88,8 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable); #define SSIP_READY_CMD SSIP_CMD(SSIP_READY, 0) #define SSIP_SWBREAK_CMD SSIP_CMD(SSIP_SW_BREAK, 0) +#define SSIP_WAKETEST_FLAG 0 + /* Main state machine states */ enum { INIT, @@ -116,7 +118,7 @@ enum { * @main_state: Main state machine * @send_state: TX state machine * @recv_state: RX state machine - * @waketest: Flag to follow wake line test + * @flags: Flags, currently only used to follow wake line test * @rxid: RX data id * @txid: TX data id * @txqueue_len: TX queue length @@ -137,7 +139,7 @@ struct ssi_protocol { unsigned int main_state; unsigned int send_state; unsigned int recv_state; - unsigned int waketest:1; + unsigned long flags; u8 rxid; u8 txid; unsigned int txqueue_len; @@ -148,6 +150,7 @@ struct ssi_protocol { struct net_device *netdev; struct list_head txqueue; struct list_head cmdqueue; + struct work_struct work; struct hsi_client *cl; struct list_head link; atomic_t tx_usecnt; @@ -405,15 +408,17 @@ static void ssip_reset(struct hsi_client *cl) spin_lock_bh(&ssi->lock); if (ssi->send_state != SEND_IDLE) hsi_stop_tx(cl); - if (ssi->waketest) - ssi_waketest(cl, 0); + spin_unlock_bh(&ssi->lock); + if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) + ssi_waketest(cl, 0); /* FIXME: To be removed */ + spin_lock_bh(&ssi->lock); del_timer(&ssi->rx_wd); del_timer(&ssi->tx_wd); del_timer(&ssi->keep_alive); ssi->main_state = 0; ssi->send_state = 0; ssi->recv_state = 0; - ssi->waketest = 0; + ssi->flags = 0; ssi->rxid = 0; ssi->txid = 0; list_for_each_safe(head, tmp, &ssi->txqueue) { @@ -437,7 +442,8 @@ static void ssip_dump_state(struct hsi_client *cl) dev_err(&cl->device, "Send state: %d\n", ssi->send_state); dev_err(&cl->device, "CMT %s\n", (ssi->main_state == ACTIVE) ? "Online" : "Offline"); - dev_err(&cl->device, "Wake test %d\n", ssi->waketest); + dev_err(&cl->device, "Wake test %d\n", + test_bit(SSIP_WAKETEST_FLAG, &ssi->flags)); dev_err(&cl->device, "Data RX id: %d\n", ssi->rxid); dev_err(&cl->device, "Data TX id: %d\n", ssi->txid); @@ -515,17 +521,17 @@ static void ssip_start_rx(struct hsi_client *cl) dev_dbg(&cl->device, "RX start M(%d) R(%d)\n", ssi->main_state, ssi->recv_state); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); /* * We can have two UP events in a row due to a short low * high transition. Therefore we need to ignore the sencond UP event. */ if ((ssi->main_state != ACTIVE) || (ssi->recv_state == RECV_READY)) { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECV_READY); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); msg = ssip_claim_cmd(ssi); ssip_set_cmd(msg, SSIP_READY_CMD); @@ -539,10 +545,10 @@ static void ssip_stop_rx(struct hsi_client *cl) struct ssi_protocol *ssi = hsi_client_drvdata(cl); dev_dbg(&cl->device, "RX stop M(%d)\n", ssi->main_state); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (likely(ssi->main_state == ACTIVE)) ssip_set_rxstate(ssi, RECV_IDLE); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } static void ssip_free_strans(struct hsi_msg *msg) @@ -559,9 +565,9 @@ static void ssip_strans_complete(struct hsi_msg *msg) data = msg->context; ssip_release_cmd(msg); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); ssip_set_txstate(ssi, SENDING); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); hsi_async_write(cl, data); } @@ -666,15 +672,17 @@ static void ssip_rx_bootinforeq(struct hsi_client *cl, u32 cmd) /* Fall through */ case INIT: case HANDSHAKE: - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; - if (!ssi->waketest) { - ssi->waketest = 1; + spin_unlock_bh(&ssi->lock); + + if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); /* Start boot handshake watchdog */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); dev_dbg(&cl->device, "Send BOOTINFO_RESP\n"); if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info req verid mismatch\n"); @@ -696,14 +704,14 @@ static void ssip_rx_bootinforesp(struct hsi_client *cl, u32 cmd) if (SSIP_DATA_VERSION(cmd) != SSIP_LOCAL_VERID) dev_warn(&cl->device, "boot info resp verid mismatch\n"); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (ssi->main_state != ACTIVE) /* Use tx_wd as a boot watchdog in non ACTIVE state */ mod_timer(&ssi->tx_wd, jiffies + msecs_to_jiffies(SSIP_WDTOUT)); else dev_dbg(&cl->device, "boot info resp ignored M(%d)\n", ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) @@ -711,20 +719,22 @@ static void ssip_rx_waketest(struct hsi_client *cl, u32 cmd) struct ssi_protocol *ssi = hsi_client_drvdata(cl); unsigned int wkres = SSIP_PAYLOAD(cmd); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (ssi->main_state != HANDSHAKE) { dev_dbg(&cl->device, "wake lines test ignored M(%d)\n", ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } - if (ssi->waketest) { - ssi->waketest = 0; + spin_unlock_bh(&ssi->lock); + + if (test_and_clear_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 0); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); ssi->main_state = ACTIVE; del_timer(&ssi->tx_wd); /* Stop boot handshake timer */ - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); dev_notice(&cl->device, "WAKELINES TEST %s\n", wkres & SSIP_WAKETEST_FAILED ? "FAILED" : "OK"); @@ -741,20 +751,20 @@ static void ssip_rx_ready(struct hsi_client *cl) { struct ssi_protocol *ssi = hsi_client_drvdata(cl); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_dbg(&cl->device, "READY on wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } if (ssi->send_state != WAIT4READY) { dev_dbg(&cl->device, "Ignore spurious READY command\n"); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_txstate(ssi, SEND_READY); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } @@ -766,22 +776,22 @@ static void ssip_rx_strans(struct hsi_client *cl, u32 cmd) int len = SSIP_PDU_LENGTH(cmd); dev_dbg(&cl->device, "RX strans: %d frames\n", len); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (unlikely(ssi->main_state != ACTIVE)) { dev_err(&cl->device, "START TRANS wrong state: S(%d) M(%d)\n", ssi->send_state, ssi->main_state); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); return; } ssip_set_rxstate(ssi, RECEIVING); if (unlikely(SSIP_MSG_ID(cmd) != ssi->rxid)) { dev_err(&cl->device, "START TRANS id %d expected %d\n", SSIP_MSG_ID(cmd), ssi->rxid); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); goto out1; } ssi->rxid++; - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); skb = netdev_alloc_skb(ssi->netdev, len * 4); if (unlikely(!skb)) { dev_err(&cl->device, "No memory for rx skb\n"); @@ -849,7 +859,7 @@ static void ssip_swbreak_complete(struct hsi_msg *msg) struct ssi_protocol *ssi = hsi_client_drvdata(cl); ssip_release_cmd(msg); - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { if (atomic_read(&ssi->tx_usecnt)) { ssip_set_txstate(ssi, SEND_READY); @@ -857,9 +867,9 @@ static void ssip_swbreak_complete(struct hsi_msg *msg) ssip_set_txstate(ssi, SEND_IDLE); hsi_stop_tx(cl); } - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); } else { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } netif_wake_queue(ssi->netdev); @@ -876,17 +886,17 @@ static void ssip_tx_data_complete(struct hsi_msg *msg) ssip_error(cl); goto out; } - spin_lock(&ssi->lock); + spin_lock_bh(&ssi->lock); if (list_empty(&ssi->txqueue)) { ssip_set_txstate(ssi, SENDING_SWBREAK); - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); cmsg = ssip_claim_cmd(ssi); ssip_set_cmd(cmsg, SSIP_SWBREAK_CMD); cmsg->complete = ssip_swbreak_complete; dev_dbg(&cl->device, "Send SWBREAK\n"); hsi_async_write(cl, cmsg); } else { - spin_unlock(&ssi->lock); + spin_unlock_bh(&ssi->lock); ssip_xmit(cl); } out: @@ -926,11 +936,11 @@ static int ssip_pn_open(struct net_device *dev) } dev_dbg(&cl->device, "Configuring SSI port\n"); hsi_setup(cl); - spin_lock_bh(&ssi->lock); - if (!ssi->waketest) { - ssi->waketest = 1; + + if (!test_and_set_bit(SSIP_WAKETEST_FLAG, &ssi->flags)) ssi_waketest(cl, 1); /* FIXME: To be removed */ - } + + spin_lock_bh(&ssi->lock); ssi->main_state = HANDSHAKE; spin_unlock_bh(&ssi->lock); @@ -959,6 +969,15 @@ static int ssip_pn_set_mtu(struct net_device *dev, int new_mtu) return 0; } +static void ssip_xmit_work(struct work_struct *work) +{ + struct ssi_protocol *ssi = + container_of(work, struct ssi_protocol, work); + struct hsi_client *cl = ssi->cl; + + ssip_xmit(cl); +} + static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) { struct hsi_client *cl = to_hsi_client(dev->dev.parent); @@ -1011,7 +1030,7 @@ static int ssip_pn_xmit(struct sk_buff *skb, struct net_device *dev) dev_dbg(&cl->device, "Start TX on SEND READY qlen %d\n", ssi->txqueue_len); spin_unlock_bh(&ssi->lock); - ssip_xmit(cl); + schedule_work(&ssi->work); } else { spin_unlock_bh(&ssi->lock); } @@ -1088,6 +1107,7 @@ static int ssi_protocol_probe(struct device *dev) atomic_set(&ssi->tx_usecnt, 0); hsi_client_set_drvdata(cl, ssi); ssi->cl = cl; + INIT_WORK(&ssi->work, ssip_xmit_work); ssi->channel_id_cmd = hsi_get_channel_id_by_name(cl, "mcsaab-control"); if (ssi->channel_id_cmd < 0) { diff --git a/drivers/hsi/controllers/omap_ssi.h b/drivers/hsi/controllers/omap_ssi.h index 7b4dec2c6..32ced0c8f 100644 --- a/drivers/hsi/controllers/omap_ssi.h +++ b/drivers/hsi/controllers/omap_ssi.h @@ -35,6 +35,8 @@ #define SSI_MAX_GDD_LCH 8 #define SSI_BYTES_TO_FRAMES(x) ((((x) - 1) >> 2) + 1) +#define SSI_WAKE_EN 0 + /** * struct omap_ssm_ctx - OMAP synchronous serial module (TX/RX) context * @mode: Bit transmission mode @@ -71,13 +73,14 @@ struct omap_ssm_ctx { * @txqueue: TX message queues * @rxqueue: RX message queues * @brkqueue: Queue of incoming HWBREAK requests (FRAME mode) + * @errqueue: Queue for failed messages + * @errqueue_work: Delayed Work for failed messages * @irq: IRQ number * @wake_irq: IRQ number for incoming wake line (-1 if none) * @wake_gpio: GPIO number for incoming wake line (-1 if none) - * @pio_tasklet: Bottom half for PIO transfers and events - * @wake_tasklet: Bottom half for incoming wake events - * @wkin_cken: Keep track of clock references due to the incoming wake line + * @flags: flags to keep track of states * @wk_refcount: Reference count for output wake line + * @work: worker for starting TX * @sys_mpu_enable: Context for the interrupt enable register for irq 0 * @sst: Context for the synchronous serial transmitter * @ssr: Context for the synchronous serial receiver @@ -95,14 +98,15 @@ struct omap_ssi_port { struct list_head txqueue[SSI_MAX_CHANNELS]; struct list_head rxqueue[SSI_MAX_CHANNELS]; struct list_head brkqueue; + struct list_head errqueue; + struct delayed_work errqueue_work; unsigned int irq; int wake_irq; struct gpio_desc *wake_gpio; - struct tasklet_struct pio_tasklet; - struct tasklet_struct wake_tasklet; bool wktest:1; /* FIXME: HACK to be removed */ - bool wkin_cken:1; /* Workaround */ + unsigned long flags; unsigned int wk_refcount; + struct work_struct work; /* OMAP SSI port context */ u32 sys_mpu_enable; /* We use only one irq */ struct omap_ssm_ctx sst; @@ -138,7 +142,6 @@ struct gdd_trn { * @fck_rate: clock rate * @loss_count: To follow if we need to restore context or not * @max_speed: Maximum TX speed (Kb/s) set by the clients. - * @sysconfig: SSI controller saved context * @gdd_gcr: SSI GDD saved context * @get_loss: Pointer to omap_pm_get_dev_context_loss_count, if any * @port: Array of pointers of the ports of the controller @@ -158,7 +161,6 @@ struct omap_ssi_controller { u32 loss_count; u32 max_speed; /* OMAP SSI Controller context */ - u32 sysconfig; u32 gdd_gcr; int (*get_loss)(struct device *dev); struct omap_ssi_port **port; diff --git a/drivers/hsi/controllers/omap_ssi_core.c b/drivers/hsi/controllers/omap_ssi_core.c index a3e0febfb..9a29b34ed 100644 --- a/drivers/hsi/controllers/omap_ssi_core.c +++ b/drivers/hsi/controllers/omap_ssi_core.c @@ -58,7 +58,7 @@ static int ssi_debug_show(struct seq_file *m, void *p __maybe_unused) seq_printf(m, "REVISION\t: 0x%08x\n", readl(sys + SSI_REVISION_REG)); seq_printf(m, "SYSCONFIG\t: 0x%08x\n", readl(sys + SSI_SYSCONFIG_REG)); seq_printf(m, "SYSSTATUS\t: 0x%08x\n", readl(sys + SSI_SYSSTATUS_REG)); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); return 0; } @@ -112,7 +112,7 @@ static int ssi_debug_gdd_show(struct seq_file *m, void *p __maybe_unused) readw(gdd + SSI_GDD_CLNK_CTRL_REG(lch))); } - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); return 0; } @@ -193,7 +193,7 @@ void ssi_waketest(struct hsi_client *cl, unsigned int enable) } else { writel_relaxed(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); } } EXPORT_SYMBOL_GPL(ssi_waketest); @@ -217,7 +217,7 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) if (msg->ttype == HSI_MSG_READ) { dir = DMA_FROM_DEVICE; val = SSI_DATAAVAILABLE(msg->channel); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(omap_port->pdev); } else { dir = DMA_TO_DEVICE; val = SSI_DATAACCEPT(msg->channel); @@ -235,7 +235,9 @@ static void ssi_gdd_complete(struct hsi_controller *ssi, unsigned int lch) spin_lock(&omap_port->lock); list_del(&msg->link); /* Dequeue msg */ spin_unlock(&omap_port->lock); - msg->complete(msg); + + list_add_tail(&msg->link, &omap_port->errqueue); + schedule_delayed_work(&omap_port->errqueue_work, 0); return; } spin_lock(&omap_port->lock); @@ -255,7 +257,13 @@ static void ssi_gdd_tasklet(unsigned long dev) unsigned int lch; u32 status_reg; - pm_runtime_get_sync(ssi->device.parent); + pm_runtime_get(ssi->device.parent); + + if (!pm_runtime_active(ssi->device.parent)) { + dev_warn(ssi->device.parent, "ssi_gdd_tasklet called without runtime PM!\n"); + pm_runtime_put(ssi->device.parent); + return; + } status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); for (lch = 0; lch < SSI_MAX_GDD_LCH; lch++) { @@ -265,7 +273,7 @@ static void ssi_gdd_tasklet(unsigned long dev) writel_relaxed(status_reg, sys + SSI_GDD_MPU_IRQ_STATUS_REG); status_reg = readl(sys + SSI_GDD_MPU_IRQ_STATUS_REG); - pm_runtime_put_sync(ssi->device.parent); + pm_runtime_put(ssi->device.parent); if (status_reg) tasklet_hi_schedule(&omap_ssi->gdd_tasklet); @@ -312,7 +320,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, continue; /* Workaround for SWBREAK + CAwake down race in CMT */ - tasklet_disable(&omap_port->wake_tasklet); + disable_irq(omap_port->wake_irq); /* stop all ssi communication */ pinctrl_pm_select_idle_state(omap_port->pdev); @@ -338,7 +346,7 @@ static int ssi_clk_event(struct notifier_block *nb, unsigned long event, /* resume ssi communication */ pinctrl_pm_select_default_state(omap_port->pdev); - tasklet_enable(&omap_port->wake_tasklet); + enable_irq(omap_port->wake_irq); } break; @@ -452,8 +460,6 @@ out_err: static int ssi_hw_init(struct hsi_controller *ssi) { struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); - unsigned int i; - u32 val; int err; err = pm_runtime_get_sync(ssi->device.parent); @@ -461,27 +467,12 @@ static int ssi_hw_init(struct hsi_controller *ssi) dev_err(&ssi->device, "runtime PM failed %d\n", err); return err; } - /* Reseting SSI controller */ - writel_relaxed(SSI_SOFTRESET, omap_ssi->sys + SSI_SYSCONFIG_REG); - val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); - for (i = 0; ((i < 20) && !(val & SSI_RESETDONE)); i++) { - msleep(20); - val = readl(omap_ssi->sys + SSI_SYSSTATUS_REG); - } - if (!(val & SSI_RESETDONE)) { - dev_err(&ssi->device, "SSI HW reset failed\n"); - pm_runtime_put_sync(ssi->device.parent); - return -EIO; - } /* Reseting GDD */ writel_relaxed(SSI_SWRESET, omap_ssi->gdd + SSI_GDD_GRST_REG); /* Get FCK rate in KHz */ omap_ssi->fck_rate = DIV_ROUND_CLOSEST(ssi_get_clk_rate(ssi), 1000); dev_dbg(&ssi->device, "SSI fck rate %lu KHz\n", omap_ssi->fck_rate); - /* Set default PM settings */ - val = SSI_AUTOIDLE | SSI_SIDLEMODE_SMART | SSI_MIDLEMODE_SMART; - writel_relaxed(val, omap_ssi->sys + SSI_SYSCONFIG_REG); - omap_ssi->sysconfig = val; + writel_relaxed(SSI_CLK_AUTOGATING_ON, omap_ssi->sys + SSI_GDD_GCR_REG); omap_ssi->gdd_gcr = SSI_CLK_AUTOGATING_ON; pm_runtime_put_sync(ssi->device.parent); @@ -552,7 +543,6 @@ static int ssi_probe(struct platform_device *pd) if (err < 0) goto out1; - pm_runtime_irq_safe(&pd->dev); pm_runtime_enable(&pd->dev); err = ssi_hw_init(ssi); diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c index 6b8f77397..7765de2f1 100644 --- a/drivers/hsi/controllers/omap_ssi_port.c +++ b/drivers/hsi/controllers/omap_ssi_port.c @@ -126,7 +126,7 @@ static int ssi_debug_port_show(struct seq_file *m, void *p __maybe_unused) seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch, readl(base + SSI_SSR_BUFFER_CH_REG(ch))); } - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -150,7 +150,7 @@ static int ssi_div_get(void *data, u64 *val) pm_runtime_get_sync(omap_port->pdev); *val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -166,7 +166,7 @@ static int ssi_div_set(void *data, u64 val) pm_runtime_get_sync(omap_port->pdev); writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG); omap_port->sst.divisor = val; - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } @@ -193,6 +193,21 @@ static int ssi_debug_add_port(struct omap_ssi_port *omap_port, } #endif +static void ssi_process_errqueue(struct work_struct *work) +{ + struct omap_ssi_port *omap_port; + struct list_head *head, *tmp; + struct hsi_msg *msg; + + omap_port = container_of(work, struct omap_ssi_port, errqueue_work.work); + + list_for_each_safe(head, tmp, &omap_port->errqueue) { + msg = list_entry(head, struct hsi_msg, link); + msg->complete(msg); + list_del(head); + } +} + static int ssi_claim_lch(struct hsi_msg *msg) { @@ -225,11 +240,21 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) u32 d_addr; u32 tmp; + /* Hold clocks during the transfer */ + pm_runtime_get(omap_port->pdev); + + if (!pm_runtime_active(omap_port->pdev)) { + dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n"); + pm_runtime_put_autosuspend(omap_port->pdev); + return -EREMOTEIO; + } + if (msg->ttype == HSI_MSG_READ) { err = dma_map_sg(&ssi->device, msg->sgt.sgl, msg->sgt.nents, DMA_FROM_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT | @@ -246,6 +271,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) DMA_TO_DEVICE); if (err < 0) { dev_dbg(&ssi->device, "DMA map SG failed !\n"); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT | @@ -261,9 +287,6 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch) dev_dbg(&ssi->device, "lch %d cdsp %08x ccr %04x s_addr %08x d_addr %08x\n", lch, csdp, ccr, s_addr, d_addr); - /* Hold clocks during the transfer */ - pm_runtime_get_sync(omap_port->pdev); - writew_relaxed(csdp, gdd + SSI_GDD_CSDP_REG(lch)); writew_relaxed(SSI_BLOCK_IE | SSI_TOUT_IE, gdd + SSI_GDD_CICR_REG(lch)); writel_relaxed(d_addr, gdd + SSI_GDD_CDSA_REG(lch)); @@ -290,11 +313,18 @@ static int ssi_start_pio(struct hsi_msg *msg) struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); u32 val; - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); + + if (!pm_runtime_active(omap_port->pdev)) { + dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n"); + pm_runtime_put_autosuspend(omap_port->pdev); + return -EREMOTEIO; + } + if (msg->ttype == HSI_MSG_WRITE) { val = SSI_DATAACCEPT(msg->channel); /* Hold clocks for pio writes */ - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); } else { val = SSI_DATAAVAILABLE(msg->channel) | SSI_ERROROCCURED; } @@ -302,7 +332,7 @@ static int ssi_start_pio(struct hsi_msg *msg) msg->ttype ? "write" : "read"); val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); msg->actual_len = 0; msg->status = HSI_STATUS_PROCEEDING; @@ -360,7 +390,8 @@ static int ssi_async_break(struct hsi_msg *msg) spin_unlock_bh(&omap_port->lock); } out: - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } @@ -388,6 +419,8 @@ static int ssi_async(struct hsi_msg *msg) queue = &omap_port->rxqueue[msg->channel]; } msg->status = HSI_STATUS_QUEUED; + + pm_runtime_get_sync(omap_port->pdev); spin_lock_bh(&omap_port->lock); list_add_tail(&msg->link, queue); err = ssi_start_transfer(queue); @@ -396,6 +429,8 @@ static int ssi_async(struct hsi_msg *msg) msg->status = HSI_STATUS_ERROR; } spin_unlock_bh(&omap_port->lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); dev_dbg(&port->device, "msg status %d ttype %d ch %d\n", msg->status, msg->ttype, msg->channel); @@ -497,7 +532,8 @@ static int ssi_setup(struct hsi_client *cl) omap_port->ssr.mode = cl->rx_cfg.mode; out: spin_unlock_bh(&omap_port->lock); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return err; } @@ -528,7 +564,7 @@ static int ssi_flush(struct hsi_client *cl) continue; writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i)); if (msg->ttype == HSI_MSG_READ) - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); omap_ssi->gdd_trn[i].msg = NULL; } /* Flush all SST buffers */ @@ -552,7 +588,7 @@ static int ssi_flush(struct hsi_client *cl) for (i = 0; i < omap_port->channels; i++) { /* Release write clocks */ if (!list_empty(&omap_port->txqueue[i])) - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); ssi_flush_queue(&omap_port->txqueue[i], NULL); ssi_flush_queue(&omap_port->rxqueue[i], NULL); } @@ -562,17 +598,28 @@ static int ssi_flush(struct hsi_client *cl) pinctrl_pm_select_default_state(omap_port->pdev); spin_unlock_bh(&omap_port->lock); - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return 0; } +static void start_tx_work(struct work_struct *work) +{ + struct omap_ssi_port *omap_port = + container_of(work, struct omap_ssi_port, work); + struct hsi_port *port = to_hsi_port(omap_port->dev); + struct hsi_controller *ssi = to_hsi_controller(port->device.parent); + struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); + + pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ + writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); +} + static int ssi_start_tx(struct hsi_client *cl) { struct hsi_port *port = hsi_get_port(cl); struct omap_ssi_port *omap_port = hsi_port_drvdata(port); - struct hsi_controller *ssi = to_hsi_controller(port->device.parent); - struct omap_ssi_controller *omap_ssi = hsi_controller_drvdata(ssi); dev_dbg(&port->device, "Wake out high %d\n", omap_port->wk_refcount); @@ -581,10 +628,10 @@ static int ssi_start_tx(struct hsi_client *cl) spin_unlock_bh(&omap_port->wk_lock); return 0; } - pm_runtime_get_sync(omap_port->pdev); /* Grab clocks */ - writel(SSI_WAKE(0), omap_ssi->sys + SSI_SET_WAKE_REG(port->num)); spin_unlock_bh(&omap_port->wk_lock); + schedule_work(&omap_port->work); + return 0; } @@ -604,9 +651,12 @@ static int ssi_stop_tx(struct hsi_client *cl) return 0; } writel(SSI_WAKE(0), omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); - pm_runtime_put_sync(omap_port->pdev); /* Release clocks */ spin_unlock_bh(&omap_port->wk_lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */ + + return 0; } @@ -616,6 +666,7 @@ static void ssi_transfer(struct omap_ssi_port *omap_port, struct hsi_msg *msg; int err = -1; + pm_runtime_get(omap_port->pdev); spin_lock_bh(&omap_port->lock); while (err < 0) { err = ssi_start_transfer(queue); @@ -630,6 +681,8 @@ static void ssi_transfer(struct omap_ssi_port *omap_port, } } spin_unlock_bh(&omap_port->lock); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } static void ssi_cleanup_queues(struct hsi_client *cl) @@ -658,7 +711,8 @@ static void ssi_cleanup_queues(struct hsi_client *cl) txbufstate |= (1 << i); status |= SSI_DATAACCEPT(i); /* Release the clocks writes, also GDD ones */ - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } ssi_flush_queue(&omap_port->txqueue[i], cl); } @@ -712,8 +766,10 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl) * Clock references for write will be handled in * ssi_cleanup_queues */ - if (msg->ttype == HSI_MSG_READ) - pm_runtime_put_sync(omap_port->pdev); + if (msg->ttype == HSI_MSG_READ) { + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); + } omap_ssi->gdd_trn[i].msg = NULL; } tmp = readl_relaxed(omap_ssi->sys + SSI_GDD_MPU_IRQ_ENABLE_REG); @@ -738,32 +794,30 @@ static int ssi_release(struct hsi_client *cl) struct omap_ssi_port *omap_port = hsi_port_drvdata(port); struct hsi_controller *ssi = to_hsi_controller(port->device.parent); - spin_lock_bh(&omap_port->lock); pm_runtime_get_sync(omap_port->pdev); + spin_lock_bh(&omap_port->lock); /* Stop all the pending DMA requests for that client */ ssi_cleanup_gdd(ssi, cl); /* Now cleanup all the queues */ ssi_cleanup_queues(cl); - pm_runtime_put_sync(omap_port->pdev); /* If it is the last client of the port, do extra checks and cleanup */ if (port->claimed <= 1) { /* * Drop the clock reference for the incoming wake line * if it is still kept high by the other side. */ - if (omap_port->wkin_cken) { + if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_put_sync(omap_port->pdev); - omap_port->wkin_cken = 0; - } - pm_runtime_get_sync(omap_port->pdev); + pm_runtime_get(omap_port->pdev); /* Stop any SSI TX/RX without a client */ ssi_set_port_mode(omap_port, SSI_MODE_SLEEP); omap_port->sst.mode = SSI_MODE_SLEEP; omap_port->ssr.mode = SSI_MODE_SLEEP; - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_put(omap_port->pdev); WARN_ON(omap_port->wk_refcount != 0); } spin_unlock_bh(&omap_port->lock); + pm_runtime_put_sync(omap_port->pdev); return 0; } @@ -868,7 +922,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) u32 reg; u32 val; - spin_lock(&omap_port->lock); + spin_lock_bh(&omap_port->lock); msg = list_first_entry(queue, struct hsi_msg, link); if ((!msg->sgt.nents) || (!msg->sgt.sgl->length)) { msg->actual_len = 0; @@ -900,7 +954,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) (msg->ttype == HSI_MSG_WRITE))) { writel(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); - spin_unlock(&omap_port->lock); + spin_unlock_bh(&omap_port->lock); return; } @@ -910,18 +964,19 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue) reg = readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); if (msg->ttype == HSI_MSG_WRITE) { /* Release clocks for write transfer */ - pm_runtime_put_sync(omap_port->pdev); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } reg &= ~val; writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0)); writel_relaxed(val, omap_ssi->sys + SSI_MPU_STATUS_REG(port->num, 0)); list_del(&msg->link); - spin_unlock(&omap_port->lock); + spin_unlock_bh(&omap_port->lock); msg->complete(msg); ssi_transfer(omap_port, queue); } -static void ssi_pio_tasklet(unsigned long ssi_port) +static irqreturn_t ssi_pio_thread(int irq, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); @@ -932,41 +987,35 @@ static void ssi_pio_tasklet(unsigned long ssi_port) u32 status_reg; pm_runtime_get_sync(omap_port->pdev); - status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); - status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - for (ch = 0; ch < omap_port->channels; ch++) { - if (status_reg & SSI_DATAACCEPT(ch)) - ssi_pio_complete(port, &omap_port->txqueue[ch]); - if (status_reg & SSI_DATAAVAILABLE(ch)) - ssi_pio_complete(port, &omap_port->rxqueue[ch]); - } - if (status_reg & SSI_BREAKDETECTED) - ssi_break_complete(port); - if (status_reg & SSI_ERROROCCURED) - ssi_error(port); + do { + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); - status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); - pm_runtime_put_sync(omap_port->pdev); + for (ch = 0; ch < omap_port->channels; ch++) { + if (status_reg & SSI_DATAACCEPT(ch)) + ssi_pio_complete(port, &omap_port->txqueue[ch]); + if (status_reg & SSI_DATAAVAILABLE(ch)) + ssi_pio_complete(port, &omap_port->rxqueue[ch]); + } + if (status_reg & SSI_BREAKDETECTED) + ssi_break_complete(port); + if (status_reg & SSI_ERROROCCURED) + ssi_error(port); - if (status_reg) - tasklet_hi_schedule(&omap_port->pio_tasklet); - else - enable_irq(omap_port->irq); -} + status_reg = readl(sys + SSI_MPU_STATUS_REG(port->num, 0)); + status_reg &= readl(sys + SSI_MPU_ENABLE_REG(port->num, 0)); -static irqreturn_t ssi_pio_isr(int irq, void *port) -{ - struct omap_ssi_port *omap_port = hsi_port_drvdata(port); + /* TODO: sleep if we retry? */ + } while (status_reg); - tasklet_hi_schedule(&omap_port->pio_tasklet); - disable_irq_nosync(irq); + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); return IRQ_HANDLED; } -static void ssi_wake_tasklet(unsigned long ssi_port) +static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port) { struct hsi_port *port = (struct hsi_port *)ssi_port; struct hsi_controller *ssi = to_hsi_controller(port->device.parent); @@ -981,12 +1030,8 @@ static void ssi_wake_tasklet(unsigned long ssi_port) * This workaround will avoid breaking the clock reference * count when such a situation ocurrs. */ - spin_lock(&omap_port->lock); - if (!omap_port->wkin_cken) { - omap_port->wkin_cken = 1; + if (!test_and_set_bit(SSI_WAKE_EN, &omap_port->flags)) pm_runtime_get_sync(omap_port->pdev); - } - spin_unlock(&omap_port->lock); dev_dbg(&ssi->device, "Wake in high\n"); if (omap_port->wktest) { /* FIXME: HACK ! To be removed */ writel(SSI_WAKE(0), @@ -1000,26 +1045,16 @@ static void ssi_wake_tasklet(unsigned long ssi_port) omap_ssi->sys + SSI_CLEAR_WAKE_REG(port->num)); } hsi_event(port, HSI_EVENT_STOP_RX); - spin_lock(&omap_port->lock); - if (omap_port->wkin_cken) { - pm_runtime_put_sync(omap_port->pdev); - omap_port->wkin_cken = 0; + if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) { + pm_runtime_mark_last_busy(omap_port->pdev); + pm_runtime_put_autosuspend(omap_port->pdev); } - spin_unlock(&omap_port->lock); } -} - -static irqreturn_t ssi_wake_isr(int irq __maybe_unused, void *ssi_port) -{ - struct omap_ssi_port *omap_port = hsi_port_drvdata(ssi_port); - - tasklet_hi_schedule(&omap_port->wake_tasklet); return IRQ_HANDLED; } -static int ssi_port_irq(struct hsi_port *port, - struct platform_device *pd) +static int ssi_port_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int err; @@ -1030,18 +1065,15 @@ static int ssi_port_irq(struct hsi_port *port, return err; } omap_port->irq = err; - tasklet_init(&omap_port->pio_tasklet, ssi_pio_tasklet, - (unsigned long)port); - err = devm_request_irq(&port->device, omap_port->irq, ssi_pio_isr, - 0, "mpu_irq0", port); + err = devm_request_threaded_irq(&port->device, omap_port->irq, NULL, + ssi_pio_thread, IRQF_ONESHOT, "SSI PORT", port); if (err < 0) dev_err(&port->device, "Request IRQ %d failed (%d)\n", omap_port->irq, err); return err; } -static int ssi_wake_irq(struct hsi_port *port, - struct platform_device *pd) +static int ssi_wake_irq(struct hsi_port *port, struct platform_device *pd) { struct omap_ssi_port *omap_port = hsi_port_drvdata(port); int cawake_irq; @@ -1053,13 +1085,12 @@ static int ssi_wake_irq(struct hsi_port *port, } cawake_irq = gpiod_to_irq(omap_port->wake_gpio); - omap_port->wake_irq = cawake_irq; - tasklet_init(&omap_port->wake_tasklet, ssi_wake_tasklet, - (unsigned long)port); - err = devm_request_irq(&port->device, cawake_irq, ssi_wake_isr, - IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, - "cawake", port); + + err = devm_request_threaded_irq(&port->device, cawake_irq, NULL, + ssi_wake_thread, + IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, + "SSI cawake", port); if (err < 0) dev_err(&port->device, "Request Wake in IRQ %d failed %d\n", cawake_irq, err); @@ -1169,6 +1200,9 @@ static int ssi_port_probe(struct platform_device *pd) omap_port->pdev = &pd->dev; omap_port->port_id = port_id; + INIT_DEFERRABLE_WORK(&omap_port->errqueue_work, ssi_process_errqueue); + INIT_WORK(&omap_port->work, start_tx_work); + /* initialize HSI port */ port->async = ssi_async; port->setup = ssi_setup; @@ -1202,7 +1236,8 @@ static int ssi_port_probe(struct platform_device *pd) spin_lock_init(&omap_port->wk_lock); omap_port->dev = &port->device; - pm_runtime_irq_safe(omap_port->pdev); + pm_runtime_use_autosuspend(omap_port->pdev); + pm_runtime_set_autosuspend_delay(omap_port->pdev, 250); pm_runtime_enable(omap_port->pdev); #ifdef CONFIG_DEBUG_FS @@ -1234,10 +1269,9 @@ static int ssi_port_remove(struct platform_device *pd) ssi_debug_remove_port(port); #endif - hsi_port_unregister_clients(port); + cancel_delayed_work_sync(&omap_port->errqueue_work); - tasklet_kill(&omap_port->wake_tasklet); - tasklet_kill(&omap_port->pio_tasklet); + hsi_port_unregister_clients(port); port->async = hsi_dummy_msg; port->setup = hsi_dummy_cl; @@ -1248,6 +1282,8 @@ static int ssi_port_remove(struct platform_device *pd) omap_ssi->port[omap_port->port_id] = NULL; platform_set_drvdata(pd, NULL); + + pm_runtime_dont_use_autosuspend(&pd->dev); pm_runtime_disable(&pd->dev); return 0; diff --git a/drivers/hsi/hsi_core.c b/drivers/hsi/hsi_core.c new file mode 100644 index 000000000..c2a2a9795 --- /dev/null +++ b/drivers/hsi/hsi_core.c @@ -0,0 +1,781 @@ +/* + * HSI core. + * + * Copyright (C) 2010 Nokia Corporation. All rights reserved. + * + * Contact: Carlos Chinea + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA + * 02110-1301 USA + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hsi_core.h" + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *a __maybe_unused, char *buf) +{ + return sprintf(buf, "hsi:%s\n", dev_name(dev)); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *hsi_bus_dev_attrs[] = { + &dev_attr_modalias.attr, + NULL, +}; +ATTRIBUTE_GROUPS(hsi_bus_dev); + +static int hsi_bus_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + add_uevent_var(env, "MODALIAS=hsi:%s", dev_name(dev)); + + return 0; +} + +static int hsi_bus_match(struct device *dev, struct device_driver *driver) +{ + if (of_driver_match_device(dev, driver)) + return true; + + if (strcmp(dev_name(dev), driver->name) == 0) + return true; + + return false; +} + +static struct bus_type hsi_bus_type = { + .name = "hsi", + .dev_groups = hsi_bus_dev_groups, + .match = hsi_bus_match, + .uevent = hsi_bus_uevent, +}; + +static void hsi_client_release(struct device *dev) +{ + struct hsi_client *cl = to_hsi_client(dev); + + kfree(cl->tx_cfg.channels); + kfree(cl->rx_cfg.channels); + kfree(cl); +} + +struct hsi_client *hsi_new_client(struct hsi_port *port, + struct hsi_board_info *info) +{ + struct hsi_client *cl; + size_t size; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + goto err; + + cl->tx_cfg = info->tx_cfg; + if (cl->tx_cfg.channels) { + size = cl->tx_cfg.num_channels * sizeof(*cl->tx_cfg.channels); + cl->tx_cfg.channels = kmemdup(info->tx_cfg.channels, size, + GFP_KERNEL); + if (!cl->tx_cfg.channels) + goto err_tx; + } + + cl->rx_cfg = info->rx_cfg; + if (cl->rx_cfg.channels) { + size = cl->rx_cfg.num_channels * sizeof(*cl->rx_cfg.channels); + cl->rx_cfg.channels = kmemdup(info->rx_cfg.channels, size, + GFP_KERNEL); + if (!cl->rx_cfg.channels) + goto err_rx; + } + + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + dev_set_name(&cl->device, "%s", info->name); + cl->device.platform_data = info->platform_data; + if (info->archdata) + cl->device.archdata = *info->archdata; + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", info->name); + put_device(&cl->device); + } + + return cl; +err_rx: + kfree(cl->tx_cfg.channels); +err_tx: + kfree(cl); +err: + return NULL; +} +EXPORT_SYMBOL_GPL(hsi_new_client); + +static void hsi_scan_board_info(struct hsi_controller *hsi) +{ + struct hsi_cl_info *cl_info; + struct hsi_port *p; + + list_for_each_entry(cl_info, &hsi_board_list, list) + if (cl_info->info.hsi_id == hsi->id) { + p = hsi_find_port_num(hsi, cl_info->info.port); + if (!p) + continue; + hsi_new_client(p, &cl_info->info); + } +} + +#ifdef CONFIG_OF +static struct hsi_board_info hsi_char_dev_info = { + .name = "hsi_char", +}; + +static int hsi_of_property_parse_mode(struct device_node *client, char *name, + unsigned int *result) +{ + const char *mode; + int err; + + err = of_property_read_string(client, name, &mode); + if (err < 0) + return err; + + if (strcmp(mode, "stream") == 0) + *result = HSI_MODE_STREAM; + else if (strcmp(mode, "frame") == 0) + *result = HSI_MODE_FRAME; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_flow(struct device_node *client, char *name, + unsigned int *result) +{ + const char *flow; + int err; + + err = of_property_read_string(client, name, &flow); + if (err < 0) + return err; + + if (strcmp(flow, "synchronized") == 0) + *result = HSI_FLOW_SYNC; + else if (strcmp(flow, "pipeline") == 0) + *result = HSI_FLOW_PIPE; + else + return -EINVAL; + + return 0; +} + +static int hsi_of_property_parse_arb_mode(struct device_node *client, + char *name, unsigned int *result) +{ + const char *arb_mode; + int err; + + err = of_property_read_string(client, name, &arb_mode); + if (err < 0) + return err; + + if (strcmp(arb_mode, "round-robin") == 0) + *result = HSI_ARB_RR; + else if (strcmp(arb_mode, "priority") == 0) + *result = HSI_ARB_PRIO; + else + return -EINVAL; + + return 0; +} + +static void hsi_add_client_from_dt(struct hsi_port *port, + struct device_node *client) +{ + struct hsi_client *cl; + struct hsi_channel channel; + struct property *prop; + char name[32]; + int length, cells, err, i, max_chan, mode; + + cl = kzalloc(sizeof(*cl), GFP_KERNEL); + if (!cl) + return; + + err = of_modalias_node(client, name, sizeof(name)); + if (err) + goto err; + + dev_set_name(&cl->device, "%s", name); + + err = hsi_of_property_parse_mode(client, "hsi-mode", &mode); + if (err) { + err = hsi_of_property_parse_mode(client, "hsi-rx-mode", + &cl->rx_cfg.mode); + if (err) + goto err; + + err = hsi_of_property_parse_mode(client, "hsi-tx-mode", + &cl->tx_cfg.mode); + if (err) + goto err; + } else { + cl->rx_cfg.mode = mode; + cl->tx_cfg.mode = mode; + } + + err = of_property_read_u32(client, "hsi-speed-kbps", + &cl->tx_cfg.speed); + if (err) + goto err; + cl->rx_cfg.speed = cl->tx_cfg.speed; + + err = hsi_of_property_parse_flow(client, "hsi-flow", + &cl->rx_cfg.flow); + if (err) + goto err; + + err = hsi_of_property_parse_arb_mode(client, "hsi-arb-mode", + &cl->rx_cfg.arb_mode); + if (err) + goto err; + + prop = of_find_property(client, "hsi-channel-ids", &length); + if (!prop) { + err = -EINVAL; + goto err; + } + + cells = length / sizeof(u32); + + cl->rx_cfg.num_channels = cells; + cl->tx_cfg.num_channels = cells; + + cl->rx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->rx_cfg.channels) { + err = -ENOMEM; + goto err; + } + + cl->tx_cfg.channels = kzalloc(cells * sizeof(channel), GFP_KERNEL); + if (!cl->tx_cfg.channels) { + err = -ENOMEM; + goto err2; + } + + max_chan = 0; + for (i = 0; i < cells; i++) { + err = of_property_read_u32_index(client, "hsi-channel-ids", i, + &channel.id); + if (err) + goto err3; + + err = of_property_read_string_index(client, "hsi-channel-names", + i, &channel.name); + if (err) + channel.name = NULL; + + if (channel.id > max_chan) + max_chan = channel.id; + + cl->rx_cfg.channels[i] = channel; + cl->tx_cfg.channels[i] = channel; + } + + cl->rx_cfg.num_hw_channels = max_chan + 1; + cl->tx_cfg.num_hw_channels = max_chan + 1; + + cl->device.bus = &hsi_bus_type; + cl->device.parent = &port->device; + cl->device.release = hsi_client_release; + cl->device.of_node = client; + + if (device_register(&cl->device) < 0) { + pr_err("hsi: failed to register client: %s\n", name); + put_device(&cl->device); + } + + return; + +err3: + kfree(cl->tx_cfg.channels); +err2: + kfree(cl->rx_cfg.channels); +err: + kfree(cl); + pr_err("hsi client: missing or incorrect of property: err=%d\n", err); +} + +void hsi_add_clients_from_dt(struct hsi_port *port, struct device_node *clients) +{ + struct device_node *child; + + /* register hsi-char device */ + hsi_new_client(port, &hsi_char_dev_info); + + for_each_available_child_of_node(clients, child) + hsi_add_client_from_dt(port, child); +} +EXPORT_SYMBOL_GPL(hsi_add_clients_from_dt); +#endif + +int hsi_remove_client(struct device *dev, void *data __maybe_unused) +{ + device_unregister(dev); + + return 0; +} +EXPORT_SYMBOL_GPL(hsi_remove_client); + +static int hsi_remove_port(struct device *dev, void *data __maybe_unused) +{ + device_for_each_child(dev, NULL, hsi_remove_client); + device_unregister(dev); + + return 0; +} + +static void hsi_controller_release(struct device *dev) +{ + struct hsi_controller *hsi = to_hsi_controller(dev); + + kfree(hsi->port); + kfree(hsi); +} + +static void hsi_port_release(struct device *dev) +{ + kfree(to_hsi_port(dev)); +} + +/** + * hsi_unregister_port - Unregister an HSI port + * @port: The HSI port to unregister + */ +void hsi_port_unregister_clients(struct hsi_port *port) +{ + device_for_each_child(&port->device, NULL, hsi_remove_client); +} +EXPORT_SYMBOL_GPL(hsi_port_unregister_clients); + +/** + * hsi_unregister_controller - Unregister an HSI controller + * @hsi: The HSI controller to register + */ +void hsi_unregister_controller(struct hsi_controller *hsi) +{ + device_for_each_child(&hsi->device, NULL, hsi_remove_port); + device_unregister(&hsi->device); +} +EXPORT_SYMBOL_GPL(hsi_unregister_controller); + +/** + * hsi_register_controller - Register an HSI controller and its ports + * @hsi: The HSI controller to register + * + * Returns -errno on failure, 0 on success. + */ +int hsi_register_controller(struct hsi_controller *hsi) +{ + unsigned int i; + int err; + + err = device_add(&hsi->device); + if (err < 0) + return err; + for (i = 0; i < hsi->num_ports; i++) { + hsi->port[i]->device.parent = &hsi->device; + err = device_add(&hsi->port[i]->device); + if (err < 0) + goto out; + } + /* Populate HSI bus with HSI clients */ + hsi_scan_board_info(hsi); + + return 0; +out: + while (i-- > 0) + device_del(&hsi->port[i]->device); + device_del(&hsi->device); + + return err; +} +EXPORT_SYMBOL_GPL(hsi_register_controller); + +/** + * hsi_register_client_driver - Register an HSI client to the HSI bus + * @drv: HSI client driver to register + * + * Returns -errno on failure, 0 on success. + */ +int hsi_register_client_driver(struct hsi_client_driver *drv) +{ + drv->driver.bus = &hsi_bus_type; + + return driver_register(&drv->driver); +} +EXPORT_SYMBOL_GPL(hsi_register_client_driver); + +static inline int hsi_dummy_msg(struct hsi_msg *msg __maybe_unused) +{ + return 0; +} + +static inline int hsi_dummy_cl(struct hsi_client *cl __maybe_unused) +{ + return 0; +} + +/** + * hsi_put_controller - Free an HSI controller + * + * @hsi: Pointer to the HSI controller to freed + * + * HSI controller drivers should only use this function if they need + * to free their allocated hsi_controller structures before a successful + * call to hsi_register_controller. Other use is not allowed. + */ +void hsi_put_controller(struct hsi_controller *hsi) +{ + unsigned int i; + + if (!hsi) + return; + + for (i = 0; i < hsi->num_ports; i++) + if (hsi->port && hsi->port[i]) + put_device(&hsi->port[i]->device); + put_device(&hsi->device); +} +EXPORT_SYMBOL_GPL(hsi_put_controller); + +/** + * hsi_alloc_controller - Allocate an HSI controller and its ports + * @n_ports: Number of ports on the HSI controller + * @flags: Kernel allocation flags + * + * Return NULL on failure or a pointer to an hsi_controller on success. + */ +struct hsi_controller *hsi_alloc_controller(unsigned int n_ports, gfp_t flags) +{ + struct hsi_controller *hsi; + struct hsi_port **port; + unsigned int i; + + if (!n_ports) + return NULL; + + hsi = kzalloc(sizeof(*hsi), flags); + if (!hsi) + return NULL; + port = kzalloc(sizeof(*port)*n_ports, flags); + if (!port) { + kfree(hsi); + return NULL; + } + hsi->num_ports = n_ports; + hsi->port = port; + hsi->device.release = hsi_controller_release; + device_initialize(&hsi->device); + + for (i = 0; i < n_ports; i++) { + port[i] = kzalloc(sizeof(**port), flags); + if (port[i] == NULL) + goto out; + port[i]->num = i; + port[i]->async = hsi_dummy_msg; + port[i]->setup = hsi_dummy_cl; + port[i]->flush = hsi_dummy_cl; + port[i]->start_tx = hsi_dummy_cl; + port[i]->stop_tx = hsi_dummy_cl; + port[i]->release = hsi_dummy_cl; + mutex_init(&port[i]->lock); + BLOCKING_INIT_NOTIFIER_HEAD(&port[i]->n_head); + dev_set_name(&port[i]->device, "port%d", i); + hsi->port[i]->device.release = hsi_port_release; + device_initialize(&hsi->port[i]->device); + } + + return hsi; +out: + hsi_put_controller(hsi); + + return NULL; +} +EXPORT_SYMBOL_GPL(hsi_alloc_controller); + +/** + * hsi_free_msg - Free an HSI message + * @msg: Pointer to the HSI message + * + * Client is responsible to free the buffers pointed by the scatterlists. + */ +void hsi_free_msg(struct hsi_msg *msg) +{ + if (!msg) + return; + sg_free_table(&msg->sgt); + kfree(msg); +} +EXPORT_SYMBOL_GPL(hsi_free_msg); + +/** + * hsi_alloc_msg - Allocate an HSI message + * @nents: Number of memory entries + * @flags: Kernel allocation flags + * + * nents can be 0. This mainly makes sense for read transfer. + * In that case, HSI drivers will call the complete callback when + * there is data to be read without consuming it. + * + * Return NULL on failure or a pointer to an hsi_msg on success. + */ +struct hsi_msg *hsi_alloc_msg(unsigned int nents, gfp_t flags) +{ + struct hsi_msg *msg; + int err; + + msg = kzalloc(sizeof(*msg), flags); + if (!msg) + return NULL; + + if (!nents) + return msg; + + err = sg_alloc_table(&msg->sgt, nents, flags); + if (unlikely(err)) { + kfree(msg); + msg = NULL; + } + + return msg; +} +EXPORT_SYMBOL_GPL(hsi_alloc_msg); + +/** + * hsi_async - Submit an HSI transfer to the controller + * @cl: HSI client sending the transfer + * @msg: The HSI transfer passed to controller + * + * The HSI message must have the channel, ttype, complete and destructor + * fields set beforehand. If nents > 0 then the client has to initialize + * also the scatterlists to point to the buffers to write to or read from. + * + * HSI controllers relay on pre-allocated buffers from their clients and they + * do not allocate buffers on their own. + * + * Once the HSI message transfer finishes, the HSI controller calls the + * complete callback with the status and actual_len fields of the HSI message + * updated. The complete callback can be called before returning from + * hsi_async. + * + * Returns -errno on failure or 0 on success + */ +int hsi_async(struct hsi_client *cl, struct hsi_msg *msg) +{ + struct hsi_port *port = hsi_get_port(cl); + + if (!hsi_port_claimed(cl)) + return -EACCES; + + WARN_ON_ONCE(!msg->destructor || !msg->complete); + msg->cl = cl; + + return port->async(msg); +} +EXPORT_SYMBOL_GPL(hsi_async); + +/** + * hsi_claim_port - Claim the HSI client's port + * @cl: HSI client that wants to claim its port + * @share: Flag to indicate if the client wants to share the port or not. + * + * Returns -errno on failure, 0 on success. + */ +int hsi_claim_port(struct hsi_client *cl, unsigned int share) +{ + struct hsi_port *port = hsi_get_port(cl); + int err = 0; + + mutex_lock(&port->lock); + if ((port->claimed) && (!port->shared || !share)) { + err = -EBUSY; + goto out; + } + if (!try_module_get(to_hsi_controller(port->device.parent)->owner)) { + err = -ENODEV; + goto out; + } + port->claimed++; + port->shared = !!share; + cl->pclaimed = 1; +out: + mutex_unlock(&port->lock); + + return err; +} +EXPORT_SYMBOL_GPL(hsi_claim_port); + +/** + * hsi_release_port - Release the HSI client's port + * @cl: HSI client which previously claimed its port + */ +void hsi_release_port(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + + mutex_lock(&port->lock); + /* Allow HW driver to do some cleanup */ + port->release(cl); + if (cl->pclaimed) + port->claimed--; + BUG_ON(port->claimed < 0); + cl->pclaimed = 0; + if (!port->claimed) + port->shared = 0; + module_put(to_hsi_controller(port->device.parent)->owner); + mutex_unlock(&port->lock); +} +EXPORT_SYMBOL_GPL(hsi_release_port); + +static int hsi_event_notifier_call(struct notifier_block *nb, + unsigned long event, void *data __maybe_unused) +{ + struct hsi_client *cl = container_of(nb, struct hsi_client, nb); + + (*cl->ehandler)(cl, event); + + return 0; +} + +/** + * hsi_register_port_event - Register a client to receive port events + * @cl: HSI client that wants to receive port events + * @handler: Event handler callback + * + * Clients should register a callback to be able to receive + * events from the ports. Registration should happen after + * claiming the port. + * The handler can be called in interrupt context. + * + * Returns -errno on error, or 0 on success. + */ +int hsi_register_port_event(struct hsi_client *cl, + void (*handler)(struct hsi_client *, unsigned long)) +{ + struct hsi_port *port = hsi_get_port(cl); + + if (!handler || cl->ehandler) + return -EINVAL; + if (!hsi_port_claimed(cl)) + return -EACCES; + cl->ehandler = handler; + cl->nb.notifier_call = hsi_event_notifier_call; + + return blocking_notifier_chain_register(&port->n_head, &cl->nb); +} +EXPORT_SYMBOL_GPL(hsi_register_port_event); + +/** + * hsi_unregister_port_event - Stop receiving port events for a client + * @cl: HSI client that wants to stop receiving port events + * + * Clients should call this function before releasing their associated + * port. + * + * Returns -errno on error, or 0 on success. + */ +int hsi_unregister_port_event(struct hsi_client *cl) +{ + struct hsi_port *port = hsi_get_port(cl); + int err; + + WARN_ON(!hsi_port_claimed(cl)); + + err = blocking_notifier_chain_unregister(&port->n_head, &cl->nb); + if (!err) + cl->ehandler = NULL; + + return err; +} +EXPORT_SYMBOL_GPL(hsi_unregister_port_event); + +/** + * hsi_event - Notifies clients about port events + * @port: Port where the event occurred + * @event: The event type + * + * Clients should not be concerned about wake line behavior. However, due + * to a race condition in HSI HW protocol, clients need to be notified + * about wake line changes, so they can implement a workaround for it. + * + * Events: + * HSI_EVENT_START_RX - Incoming wake line high + * HSI_EVENT_STOP_RX - Incoming wake line down + * + * Returns -errno on error, or 0 on success. + */ +int hsi_event(struct hsi_port *port, unsigned long event) +{ + return blocking_notifier_call_chain(&port->n_head, event, NULL); +} +EXPORT_SYMBOL_GPL(hsi_event); + +/** + * hsi_get_channel_id_by_name - acquire channel id by channel name + * @cl: HSI client, which uses the channel + * @name: name the channel is known under + * + * Clients can call this function to get the hsi channel ids similar to + * requesting IRQs or GPIOs by name. This function assumes the same + * channel configuration is used for RX and TX. + * + * Returns -errno on error or channel id on success. + */ +int hsi_get_channel_id_by_name(struct hsi_client *cl, char *name) +{ + int i; + + if (!cl->rx_cfg.channels) + return -ENOENT; + + for (i = 0; i < cl->rx_cfg.num_channels; i++) + if (!strcmp(cl->rx_cfg.channels[i].name, name)) + return cl->rx_cfg.channels[i].id; + + return -ENXIO; +} +EXPORT_SYMBOL_GPL(hsi_get_channel_id_by_name); + +static int __init hsi_init(void) +{ + return bus_register(&hsi_bus_type); +} +postcore_initcall(hsi_init); + +static void __exit hsi_exit(void) +{ + bus_unregister(&hsi_bus_type); +} +module_exit(hsi_exit); + +MODULE_AUTHOR("Carlos Chinea "); +MODULE_DESCRIPTION("High-speed Synchronous Serial Interface (HSI) framework"); +MODULE_LICENSE("GPL v2"); -- cgit v1.2.3-54-g00ecf