From 57f0f512b273f60d52568b8c6b77e17f5636edc0 Mon Sep 17 00:00:00 2001 From: AndrĂ© Fabian Silva Delgado Date: Wed, 5 Aug 2015 17:04:01 -0300 Subject: Initial import --- drivers/rapidio/Kconfig | 75 + drivers/rapidio/Makefile | 12 + drivers/rapidio/devices/Kconfig | 10 + drivers/rapidio/devices/Makefile | 7 + drivers/rapidio/devices/tsi721.c | 2518 ++++++++++++++++++++++++++++++++++ drivers/rapidio/devices/tsi721.h | 853 ++++++++++++ drivers/rapidio/devices/tsi721_dma.c | 907 ++++++++++++ drivers/rapidio/rio-access.c | 175 +++ drivers/rapidio/rio-driver.c | 260 ++++ drivers/rapidio/rio-scan.c | 1201 ++++++++++++++++ drivers/rapidio/rio-sysfs.c | 383 ++++++ drivers/rapidio/rio.c | 1969 ++++++++++++++++++++++++++ drivers/rapidio/rio.h | 56 + drivers/rapidio/switches/Kconfig | 24 + drivers/rapidio/switches/Makefile | 8 + drivers/rapidio/switches/idt_gen2.c | 495 +++++++ drivers/rapidio/switches/idtcps.c | 203 +++ drivers/rapidio/switches/tsi568.c | 199 +++ drivers/rapidio/switches/tsi57x.c | 371 +++++ 19 files changed, 9726 insertions(+) create mode 100644 drivers/rapidio/Kconfig create mode 100644 drivers/rapidio/Makefile create mode 100644 drivers/rapidio/devices/Kconfig create mode 100644 drivers/rapidio/devices/Makefile create mode 100644 drivers/rapidio/devices/tsi721.c create mode 100644 drivers/rapidio/devices/tsi721.h create mode 100644 drivers/rapidio/devices/tsi721_dma.c create mode 100644 drivers/rapidio/rio-access.c create mode 100644 drivers/rapidio/rio-driver.c create mode 100644 drivers/rapidio/rio-scan.c create mode 100644 drivers/rapidio/rio-sysfs.c create mode 100644 drivers/rapidio/rio.c create mode 100644 drivers/rapidio/rio.h create mode 100644 drivers/rapidio/switches/Kconfig create mode 100644 drivers/rapidio/switches/Makefile create mode 100644 drivers/rapidio/switches/idt_gen2.c create mode 100644 drivers/rapidio/switches/idtcps.c create mode 100644 drivers/rapidio/switches/tsi568.c create mode 100644 drivers/rapidio/switches/tsi57x.c (limited to 'drivers/rapidio') diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig new file mode 100644 index 000000000..3e3be57e9 --- /dev/null +++ b/drivers/rapidio/Kconfig @@ -0,0 +1,75 @@ +# +# RapidIO configuration +# +source "drivers/rapidio/devices/Kconfig" + +config RAPIDIO_DISC_TIMEOUT + int "Discovery timeout duration (seconds)" + depends on RAPIDIO + default "30" + ---help--- + Amount of time a discovery node waits for a host to complete + enumeration before giving up. + +config RAPIDIO_ENABLE_RX_TX_PORTS + bool "Enable RapidIO Input/Output Ports" + depends on RAPIDIO + ---help--- + The RapidIO specification describes a Output port transmit + enable and a Input port receive enable. The recommended state + for Input ports and Output ports should be disabled. When + this switch is set the RapidIO subsystem will enable all + ports for Input/Output direction to allow other traffic + than Maintenance transfers. + +config RAPIDIO_DMA_ENGINE + bool "DMA Engine support for RapidIO" + depends on RAPIDIO + select DMADEVICES + select DMA_ENGINE + help + Say Y here if you want to use DMA Engine frameork for RapidIO data + transfers to/from target RIO devices. RapidIO uses NREAD and + NWRITE (NWRITE_R, SWRITE) requests to transfer data between local + memory and memory on remote target device. You need a DMA controller + capable to perform data transfers to/from RapidIO. + + If you are unsure about this, say Y here. + +config RAPIDIO_DEBUG + bool "RapidIO subsystem debug messages" + depends on RAPIDIO + help + Say Y here if you want the RapidIO subsystem to produce a bunch of + debug messages to the system log. Select this if you are having a + problem with the RapidIO subsystem and want to see more of what is + going on. + + If you are unsure about this, say N here. + +choice + prompt "Enumeration method" + depends on RAPIDIO + default RAPIDIO_ENUM_BASIC + help + There are different enumeration and discovery mechanisms offered + for RapidIO subsystem. You may select single built-in method or + or any number of methods to be built as modules. + Selecting a built-in method disables use of loadable methods. + + If unsure, select Basic built-in. + +config RAPIDIO_ENUM_BASIC + tristate "Basic" + help + This option includes basic RapidIO fabric enumeration and discovery + mechanism similar to one described in RapidIO specification Annex 1. + +endchoice + +menu "RapidIO Switch drivers" + depends on RAPIDIO + +source "drivers/rapidio/switches/Kconfig" + +endmenu diff --git a/drivers/rapidio/Makefile b/drivers/rapidio/Makefile new file mode 100644 index 000000000..6271ada69 --- /dev/null +++ b/drivers/rapidio/Makefile @@ -0,0 +1,12 @@ +# +# Makefile for RapidIO interconnect services +# +obj-$(CONFIG_RAPIDIO) += rapidio.o +rapidio-y := rio.o rio-access.o rio-driver.o rio-sysfs.o + +obj-$(CONFIG_RAPIDIO_ENUM_BASIC) += rio-scan.o + +obj-$(CONFIG_RAPIDIO) += switches/ +obj-$(CONFIG_RAPIDIO) += devices/ + +subdir-ccflags-$(CONFIG_RAPIDIO_DEBUG) := -DDEBUG diff --git a/drivers/rapidio/devices/Kconfig b/drivers/rapidio/devices/Kconfig new file mode 100644 index 000000000..c4cb08775 --- /dev/null +++ b/drivers/rapidio/devices/Kconfig @@ -0,0 +1,10 @@ +# +# RapidIO master port configuration +# + +config RAPIDIO_TSI721 + tristate "IDT Tsi721 PCI Express SRIO Controller support" + depends on RAPIDIO && PCIEPORTBUS + default "n" + ---help--- + Include support for IDT Tsi721 PCI Express Serial RapidIO controller. diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile new file mode 100644 index 000000000..9432c494c --- /dev/null +++ b/drivers/rapidio/devices/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for RapidIO devices +# + +obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_mport.o +tsi721_mport-y := tsi721.o +tsi721_mport-$(CONFIG_RAPIDIO_DMA_ENGINE) += tsi721_dma.o diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c new file mode 100644 index 000000000..eeca70ddb --- /dev/null +++ b/drivers/rapidio/devices/tsi721.c @@ -0,0 +1,2518 @@ +/* + * RapidIO mport driver for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright 2011 Integrated Device Technology, Inc. + * Alexandre Bounine + * Chul Kim + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "tsi721.h" + +#define DEBUG_PW /* Inbound Port-Write debugging */ + +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch); +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch); + +/** + * tsi721_lcread - read from local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be read into + * + * Generates a local SREP space read. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcread(struct rio_mport *mport, int index, u32 offset, + int len, u32 *data) +{ + struct tsi721_device *priv = mport->priv; + + if (len != sizeof(u32)) + return -EINVAL; /* only 32-bit access is supported */ + + *data = ioread32(priv->regs + offset); + + return 0; +} + +/** + * tsi721_lcwrite - write into local SREP config space + * @mport: RapidIO master port info + * @index: ID of RapdiIO interface + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Value to be written + * + * Generates a local write into SREP configuration space. Returns %0 on + * success or %-EINVAL on failure. + */ +static int tsi721_lcwrite(struct rio_mport *mport, int index, u32 offset, + int len, u32 data) +{ + struct tsi721_device *priv = mport->priv; + + if (len != sizeof(u32)) + return -EINVAL; /* only 32-bit access is supported */ + + iowrite32(data, priv->regs + offset); + + return 0; +} + +/** + * tsi721_maint_dma - Helper function to generate RapidIO maintenance + * transactions using designated Tsi721 DMA channel. + * @priv: pointer to tsi721 private data + * @sys_size: RapdiIO transport system size + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @data: Location to be read from or write into + * @do_wr: Operation flag (1 == MAINT_WR) + * + * Generates a RapidIO maintenance transaction (Read or Write). + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size, + u16 destid, u8 hopcount, u32 offset, int len, + u32 *data, int do_wr) +{ + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id); + struct tsi721_dma_desc *bd_ptr; + u32 rd_count, swr_ptr, ch_stat; + int i, err = 0; + u32 op = do_wr ? MAINT_WR : MAINT_RD; + + if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) + return -EINVAL; + + bd_ptr = priv->mdma.bd_base; + + rd_count = ioread32(regs + TSI721_DMAC_DRDCNT); + + /* Initialize DMA descriptor */ + bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); + bd_ptr[0].bcount = cpu_to_le32((sys_size << 26) | 0x04); + bd_ptr[0].raddr_lo = cpu_to_le32((hopcount << 24) | offset); + bd_ptr[0].raddr_hi = 0; + if (do_wr) + bd_ptr[0].data[0] = cpu_to_be32p(data); + else + bd_ptr[0].data[0] = 0xffffffff; + + mb(); + + /* Start DMA operation */ + iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT); + ioread32(regs + TSI721_DMAC_DWRCNT); + i = 0; + + /* Wait until DMA transfer is finished */ + while ((ch_stat = ioread32(regs + TSI721_DMAC_STS)) + & TSI721_DMAC_STS_RUN) { + udelay(1); + if (++i >= 5000000) { + dev_dbg(&priv->pdev->dev, + "%s : DMA[%d] read timeout ch_status=%x\n", + __func__, priv->mdma.ch_id, ch_stat); + if (!do_wr) + *data = 0xffffffff; + err = -EIO; + goto err_out; + } + } + + if (ch_stat & TSI721_DMAC_STS_ABORT) { + /* If DMA operation aborted due to error, + * reinitialize DMA channel + */ + dev_dbg(&priv->pdev->dev, "%s : DMA ABORT ch_stat=%x\n", + __func__, ch_stat); + dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", + do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + udelay(10); + iowrite32(0, regs + TSI721_DMAC_DWRCNT); + udelay(1); + if (!do_wr) + *data = 0xffffffff; + err = -EIO; + goto err_out; + } + + if (!do_wr) + *data = be32_to_cpu(bd_ptr[0].data[0]); + + /* + * Update descriptor status FIFO RD pointer. + * NOTE: Skipping check and clear FIFO entries because we are waiting + * for transfer to be completed. + */ + swr_ptr = ioread32(regs + TSI721_DMAC_DSWP); + iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP); +err_out: + + return err; +} + +/** + * tsi721_cread_dma - Generate a RapidIO maintenance read transaction + * using Tsi721 BDMA engine. + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Location to be read into + * + * Generates a RapidIO maintenance read transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cread_dma(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 *data) +{ + struct tsi721_device *priv = mport->priv; + + return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, + offset, len, data, 0); +} + +/** + * tsi721_cwrite_dma - Generate a RapidIO maintenance write transaction + * using Tsi721 BDMA engine + * @mport: RapidIO master port control structure + * @index: ID of RapdiIO interface + * @destid: Destination ID of transaction + * @hopcount: Number of hops to target device + * @offset: Offset into configuration space + * @len: Length (in bytes) of the maintenance transaction + * @val: Value to be written + * + * Generates a RapidIO maintenance write transaction. + * Returns %0 on success and %-EINVAL or %-EFAULT on failure. + */ +static int tsi721_cwrite_dma(struct rio_mport *mport, int index, u16 destid, + u8 hopcount, u32 offset, int len, u32 data) +{ + struct tsi721_device *priv = mport->priv; + u32 temp = data; + + return tsi721_maint_dma(priv, mport->sys_size, destid, hopcount, + offset, len, &temp, 1); +} + +/** + * tsi721_pw_handler - Tsi721 inbound port-write interrupt handler + * @mport: RapidIO master port structure + * + * Handles inbound port-write interrupts. Copies PW message from an internal + * buffer into PW message FIFO and schedules deferred routine to process + * queued messages. + */ +static int +tsi721_pw_handler(struct rio_mport *mport) +{ + struct tsi721_device *priv = mport->priv; + u32 pw_stat; + u32 pw_buf[TSI721_RIO_PW_MSG_SIZE/sizeof(u32)]; + + + pw_stat = ioread32(priv->regs + TSI721_RIO_PW_RX_STAT); + + if (pw_stat & TSI721_RIO_PW_RX_STAT_PW_VAL) { + pw_buf[0] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(0)); + pw_buf[1] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(1)); + pw_buf[2] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(2)); + pw_buf[3] = ioread32(priv->regs + TSI721_RIO_PW_RX_CAPT(3)); + + /* Queue PW message (if there is room in FIFO), + * otherwise discard it. + */ + spin_lock(&priv->pw_fifo_lock); + if (kfifo_avail(&priv->pw_fifo) >= TSI721_RIO_PW_MSG_SIZE) + kfifo_in(&priv->pw_fifo, pw_buf, + TSI721_RIO_PW_MSG_SIZE); + else + priv->pw_discard_count++; + spin_unlock(&priv->pw_fifo_lock); + } + + /* Clear pending PW interrupts */ + iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, + priv->regs + TSI721_RIO_PW_RX_STAT); + + schedule_work(&priv->pw_work); + + return 0; +} + +static void tsi721_pw_dpc(struct work_struct *work) +{ + struct tsi721_device *priv = container_of(work, struct tsi721_device, + pw_work); + u32 msg_buffer[RIO_PW_MSG_SIZE/sizeof(u32)]; /* Use full size PW message + buffer for RIO layer */ + + /* + * Process port-write messages + */ + while (kfifo_out_spinlocked(&priv->pw_fifo, (unsigned char *)msg_buffer, + TSI721_RIO_PW_MSG_SIZE, &priv->pw_fifo_lock)) { + /* Process one message */ +#ifdef DEBUG_PW + { + u32 i; + pr_debug("%s : Port-Write Message:", __func__); + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32); ) { + pr_debug("0x%02x: %08x %08x %08x %08x", i*4, + msg_buffer[i], msg_buffer[i + 1], + msg_buffer[i + 2], msg_buffer[i + 3]); + i += 4; + } + pr_debug("\n"); + } +#endif + /* Pass the port-write message to RIO core for processing */ + rio_inb_pwrite_handler((union rio_pw_msg *)msg_buffer); + } +} + +/** + * tsi721_pw_enable - enable/disable port-write interface init + * @mport: Master port implementing the port write unit + * @enable: 1=enable; 0=disable port-write message handling + */ +static int tsi721_pw_enable(struct rio_mport *mport, int enable) +{ + struct tsi721_device *priv = mport->priv; + u32 rval; + + rval = ioread32(priv->regs + TSI721_RIO_EM_INT_ENABLE); + + if (enable) + rval |= TSI721_RIO_EM_INT_ENABLE_PW_RX; + else + rval &= ~TSI721_RIO_EM_INT_ENABLE_PW_RX; + + /* Clear pending PW interrupts */ + iowrite32(TSI721_RIO_PW_RX_STAT_PW_DISC | TSI721_RIO_PW_RX_STAT_PW_VAL, + priv->regs + TSI721_RIO_PW_RX_STAT); + /* Update enable bits */ + iowrite32(rval, priv->regs + TSI721_RIO_EM_INT_ENABLE); + + return 0; +} + +/** + * tsi721_dsend - Send a RapidIO doorbell + * @mport: RapidIO master port info + * @index: ID of RapidIO interface + * @destid: Destination ID of target device + * @data: 16-bit info field of RapidIO doorbell + * + * Sends a RapidIO doorbell message. Always returns %0. + */ +static int tsi721_dsend(struct rio_mport *mport, int index, + u16 destid, u16 data) +{ + struct tsi721_device *priv = mport->priv; + u32 offset; + + offset = (((mport->sys_size) ? RIO_TT_CODE_16 : RIO_TT_CODE_8) << 18) | + (destid << 2); + + dev_dbg(&priv->pdev->dev, + "Send Doorbell 0x%04x to destID 0x%x\n", data, destid); + iowrite16be(data, priv->odb_base + offset); + + return 0; +} + +/** + * tsi721_dbell_handler - Tsi721 doorbell interrupt handler + * @mport: RapidIO master port structure + * + * Handles inbound doorbell interrupts. Copies doorbell entry from an internal + * buffer into DB message FIFO and schedules deferred routine to process + * queued DBs. + */ +static int +tsi721_dbell_handler(struct rio_mport *mport) +{ + struct tsi721_device *priv = mport->priv; + u32 regval; + + /* Disable IDB interrupts */ + regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + regval &= ~TSI721_SR_CHINT_IDBQRCV; + iowrite32(regval, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + schedule_work(&priv->idb_work); + + return 0; +} + +static void tsi721_db_dpc(struct work_struct *work) +{ + struct tsi721_device *priv = container_of(work, struct tsi721_device, + idb_work); + struct rio_mport *mport; + struct rio_dbell *dbell; + int found = 0; + u32 wr_ptr, rd_ptr; + u64 *idb_entry; + u32 regval; + union { + u64 msg; + u8 bytes[8]; + } idb; + + /* + * Process queued inbound doorbells + */ + mport = priv->mport; + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + rd_ptr = ioread32(priv->regs + TSI721_IDQ_RP(IDB_QUEUE)) % IDB_QSIZE; + + while (wr_ptr != rd_ptr) { + idb_entry = (u64 *)(priv->idb_base + + (TSI721_IDB_ENTRY_SIZE * rd_ptr)); + rd_ptr++; + rd_ptr %= IDB_QSIZE; + idb.msg = *idb_entry; + *idb_entry = 0; + + /* Process one doorbell */ + list_for_each_entry(dbell, &mport->dbells, node) { + if ((dbell->res->start <= DBELL_INF(idb.bytes)) && + (dbell->res->end >= DBELL_INF(idb.bytes))) { + found = 1; + break; + } + } + + if (found) { + dbell->dinb(mport, dbell->dev_id, DBELL_SID(idb.bytes), + DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); + } else { + dev_dbg(&priv->pdev->dev, + "spurious inb doorbell, sid %2.2x tid %2.2x" + " info %4.4x\n", DBELL_SID(idb.bytes), + DBELL_TID(idb.bytes), DBELL_INF(idb.bytes)); + } + + wr_ptr = ioread32(priv->regs + + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + } + + iowrite32(rd_ptr & (IDB_QSIZE - 1), + priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + + /* Re-enable IDB interrupts */ + regval = ioread32(priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + regval |= TSI721_SR_CHINT_IDBQRCV; + iowrite32(regval, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + wr_ptr = ioread32(priv->regs + TSI721_IDQ_WP(IDB_QUEUE)) % IDB_QSIZE; + if (wr_ptr != rd_ptr) + schedule_work(&priv->idb_work); +} + +/** + * tsi721_irqhandler - Tsi721 interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts signaled using MSI and INTA. Checks reported + * interrupt events and calls an event-specific handler(s). + */ +static irqreturn_t tsi721_irqhandler(int irq, void *ptr) +{ + struct rio_mport *mport = (struct rio_mport *)ptr; + struct tsi721_device *priv = mport->priv; + u32 dev_int; + u32 dev_ch_int; + u32 intval; + u32 ch_inte; + + /* For MSI mode disable all device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) + iowrite32(0, priv->regs + TSI721_DEV_INTE); + + dev_int = ioread32(priv->regs + TSI721_DEV_INT); + if (!dev_int) + return IRQ_NONE; + + dev_ch_int = ioread32(priv->regs + TSI721_DEV_CHAN_INT); + + if (dev_int & TSI721_DEV_INT_SR2PC_CH) { + /* Service SR2PC Channel interrupts */ + if (dev_ch_int & TSI721_INT_SR2PC_CHAN(IDB_QUEUE)) { + /* Service Inbound Doorbell interrupt */ + intval = ioread32(priv->regs + + TSI721_SR_CHINT(IDB_QUEUE)); + if (intval & TSI721_SR_CHINT_IDBQRCV) + tsi721_dbell_handler(mport); + else + dev_info(&priv->pdev->dev, + "Unsupported SR_CH_INT %x\n", intval); + + /* Clear interrupts */ + iowrite32(intval, + priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + } + } + + if (dev_int & TSI721_DEV_INT_SMSG_CH) { + int ch; + + /* + * Service channel interrupts from Messaging Engine + */ + + if (dev_ch_int & TSI721_INT_IMSG_CHAN_M) { /* Inbound Msg */ + /* Disable signaled OB MSG Channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte &= ~(dev_ch_int & TSI721_INT_IMSG_CHAN_M); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + + /* + * Process Inbound Message interrupt for each MBOX + */ + for (ch = 4; ch < RIO_MAX_MBOX + 4; ch++) { + if (!(dev_ch_int & TSI721_INT_IMSG_CHAN(ch))) + continue; + tsi721_imsg_handler(priv, ch); + } + } + + if (dev_ch_int & TSI721_INT_OMSG_CHAN_M) { /* Outbound Msg */ + /* Disable signaled OB MSG Channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte &= ~(dev_ch_int & TSI721_INT_OMSG_CHAN_M); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + + /* + * Process Outbound Message interrupts for each MBOX + */ + + for (ch = 0; ch < RIO_MAX_MBOX; ch++) { + if (!(dev_ch_int & TSI721_INT_OMSG_CHAN(ch))) + continue; + tsi721_omsg_handler(priv, ch); + } + } + } + + if (dev_int & TSI721_DEV_INT_SRIO) { + /* Service SRIO MAC interrupts */ + intval = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); + if (intval & TSI721_RIO_EM_INT_STAT_PW_RX) + tsi721_pw_handler(mport); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + if (dev_int & TSI721_DEV_INT_BDMA_CH) { + int ch; + + if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) { + dev_dbg(&priv->pdev->dev, + "IRQ from DMA channel 0x%08x\n", dev_ch_int); + + for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) { + if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch))) + continue; + tsi721_bdma_handler(&priv->bdma[ch]); + } + } + } +#endif + + /* For MSI mode re-enable device-level interrupts */ + if (priv->flags & TSI721_USING_MSI) { + dev_int = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + iowrite32(dev_int, priv->regs + TSI721_DEV_INTE); + } + + return IRQ_HANDLED; +} + +static void tsi721_interrupts_init(struct tsi721_device *priv) +{ + u32 intr; + + /* Enable IDB interrupts */ + iowrite32(TSI721_SR_CHINT_ALL, + priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + iowrite32(TSI721_SR_CHINT_IDBQRCV, + priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); + + /* Enable SRIO MAC interrupts */ + iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, + priv->regs + TSI721_RIO_EM_DEV_INT_EN); + + /* Enable interrupts from channels in use */ +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) | + (TSI721_INT_BDMA_CHAN_M & + ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT)); +#else + intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE); +#endif + iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE); + + if (priv->flags & TSI721_USING_MSIX) + intr = TSI721_DEV_INT_SRIO; + else + intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | + TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH; + + iowrite32(intr, priv->regs + TSI721_DEV_INTE); + ioread32(priv->regs + TSI721_DEV_INTE); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for outbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles outbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_omsg_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + int mbox; + + mbox = (irq - priv->msix[TSI721_VECT_OMB0_DONE].vector) % RIO_MAX_MBOX; + tsi721_omsg_handler(priv, mbox); + return IRQ_HANDLED; +} + +/** + * tsi721_imsg_msix - MSI-X interrupt handler for inbound messaging + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles inbound messaging interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_imsg_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + int mbox; + + mbox = (irq - priv->msix[TSI721_VECT_IMB0_RCV].vector) % RIO_MAX_MBOX; + tsi721_imsg_handler(priv, mbox + 4); + return IRQ_HANDLED; +} + +/** + * tsi721_srio_msix - Tsi721 MSI-X SRIO MAC interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts from SRIO MAC. + */ +static irqreturn_t tsi721_srio_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + u32 srio_int; + + /* Service SRIO MAC interrupts */ + srio_int = ioread32(priv->regs + TSI721_RIO_EM_INT_STAT); + if (srio_int & TSI721_RIO_EM_INT_STAT_PW_RX) + tsi721_pw_handler((struct rio_mport *)ptr); + + return IRQ_HANDLED; +} + +/** + * tsi721_sr2pc_ch_msix - Tsi721 MSI-X SR2PC Channel interrupt handler + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (mport structure) + * + * Handles Tsi721 interrupts from SR2PC Channel. + * NOTE: At this moment services only one SR2PC channel associated with inbound + * doorbells. + */ +static irqreturn_t tsi721_sr2pc_ch_msix(int irq, void *ptr) +{ + struct tsi721_device *priv = ((struct rio_mport *)ptr)->priv; + u32 sr_ch_int; + + /* Service Inbound DB interrupt from SR2PC channel */ + sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + if (sr_ch_int & TSI721_SR_CHINT_IDBQRCV) + tsi721_dbell_handler((struct rio_mport *)ptr); + + /* Clear interrupts */ + iowrite32(sr_ch_int, priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + /* Read back to ensure that interrupt was cleared */ + sr_ch_int = ioread32(priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); + + return IRQ_HANDLED; +} + +/** + * tsi721_request_msix - register interrupt service for MSI-X mode. + * @mport: RapidIO master port structure + * + * Registers MSI-X interrupt service routines for interrupts that are active + * immediately after mport initialization. Messaging interrupt service routines + * should be registered during corresponding open requests. + */ +static int tsi721_request_msix(struct rio_mport *mport) +{ + struct tsi721_device *priv = mport->priv; + int err = 0; + + err = request_irq(priv->msix[TSI721_VECT_IDB].vector, + tsi721_sr2pc_ch_msix, 0, + priv->msix[TSI721_VECT_IDB].irq_name, (void *)mport); + if (err) + goto out; + + err = request_irq(priv->msix[TSI721_VECT_PWRX].vector, + tsi721_srio_msix, 0, + priv->msix[TSI721_VECT_PWRX].irq_name, (void *)mport); + if (err) + free_irq( + priv->msix[TSI721_VECT_IDB].vector, + (void *)mport); +out: + return err; +} + +/** + * tsi721_enable_msix - Attempts to enable MSI-X support for Tsi721. + * @priv: pointer to tsi721 private data + * + * Configures MSI-X support for Tsi721. Supports only an exact number + * of requested vectors. + */ +static int tsi721_enable_msix(struct tsi721_device *priv) +{ + struct msix_entry entries[TSI721_VECT_MAX]; + int err; + int i; + + entries[TSI721_VECT_IDB].entry = TSI721_MSIX_SR2PC_IDBQ_RCV(IDB_QUEUE); + entries[TSI721_VECT_PWRX].entry = TSI721_MSIX_SRIO_MAC_INT; + + /* + * Initialize MSI-X entries for Messaging Engine: + * this driver supports four RIO mailboxes (inbound and outbound) + * NOTE: Inbound message MBOX 0...4 use IB channels 4...7. Therefore + * offset +4 is added to IB MBOX number. + */ + for (i = 0; i < RIO_MAX_MBOX; i++) { + entries[TSI721_VECT_IMB0_RCV + i].entry = + TSI721_MSIX_IMSG_DQ_RCV(i + 4); + entries[TSI721_VECT_IMB0_INT + i].entry = + TSI721_MSIX_IMSG_INT(i + 4); + entries[TSI721_VECT_OMB0_DONE + i].entry = + TSI721_MSIX_OMSG_DONE(i); + entries[TSI721_VECT_OMB0_INT + i].entry = + TSI721_MSIX_OMSG_INT(i); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + /* + * Initialize MSI-X entries for Block DMA Engine: + * this driver supports XXX DMA channels + * (one is reserved for SRIO maintenance transactions) + */ + for (i = 0; i < TSI721_DMA_CHNUM; i++) { + entries[TSI721_VECT_DMA0_DONE + i].entry = + TSI721_MSIX_DMACH_DONE(i); + entries[TSI721_VECT_DMA0_INT + i].entry = + TSI721_MSIX_DMACH_INT(i); + } +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + + err = pci_enable_msix_exact(priv->pdev, entries, ARRAY_SIZE(entries)); + if (err) { + dev_err(&priv->pdev->dev, + "Failed to enable MSI-X (err=%d)\n", err); + return err; + } + + /* + * Copy MSI-X vector information into tsi721 private structure + */ + priv->msix[TSI721_VECT_IDB].vector = entries[TSI721_VECT_IDB].vector; + snprintf(priv->msix[TSI721_VECT_IDB].irq_name, IRQ_DEVICE_NAME_MAX, + DRV_NAME "-idb@pci:%s", pci_name(priv->pdev)); + priv->msix[TSI721_VECT_PWRX].vector = entries[TSI721_VECT_PWRX].vector; + snprintf(priv->msix[TSI721_VECT_PWRX].irq_name, IRQ_DEVICE_NAME_MAX, + DRV_NAME "-pwrx@pci:%s", pci_name(priv->pdev)); + + for (i = 0; i < RIO_MAX_MBOX; i++) { + priv->msix[TSI721_VECT_IMB0_RCV + i].vector = + entries[TSI721_VECT_IMB0_RCV + i].vector; + snprintf(priv->msix[TSI721_VECT_IMB0_RCV + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbr%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_IMB0_INT + i].vector = + entries[TSI721_VECT_IMB0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_IMB0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-imbi%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_OMB0_DONE + i].vector = + entries[TSI721_VECT_OMB0_DONE + i].vector; + snprintf(priv->msix[TSI721_VECT_OMB0_DONE + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombd%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_OMB0_INT + i].vector = + entries[TSI721_VECT_OMB0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_OMB0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-ombi%d@pci:%s", + i, pci_name(priv->pdev)); + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + for (i = 0; i < TSI721_DMA_CHNUM; i++) { + priv->msix[TSI721_VECT_DMA0_DONE + i].vector = + entries[TSI721_VECT_DMA0_DONE + i].vector; + snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s", + i, pci_name(priv->pdev)); + + priv->msix[TSI721_VECT_DMA0_INT + i].vector = + entries[TSI721_VECT_DMA0_INT + i].vector; + snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name, + IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s", + i, pci_name(priv->pdev)); + } +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + + return 0; +} +#endif /* CONFIG_PCI_MSI */ + +static int tsi721_request_irq(struct rio_mport *mport) +{ + struct tsi721_device *priv = mport->priv; + int err; + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) + err = tsi721_request_msix(mport); + else +#endif + err = request_irq(priv->pdev->irq, tsi721_irqhandler, + (priv->flags & TSI721_USING_MSI) ? 0 : IRQF_SHARED, + DRV_NAME, (void *)mport); + + if (err) + dev_err(&priv->pdev->dev, + "Unable to allocate interrupt, Error: %d\n", err); + + return err; +} + +/** + * tsi721_init_pc2sr_mapping - initializes outbound (PCIe->SRIO) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables SREP translation regions. + */ +static void tsi721_init_pc2sr_mapping(struct tsi721_device *priv) +{ + int i; + + /* Disable all PC2SR translation windows */ + for (i = 0; i < TSI721_OBWIN_NUM; i++) + iowrite32(0, priv->regs + TSI721_OBWINLB(i)); +} + +/** + * tsi721_rio_map_inb_mem -- Mapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + * @rstart: RapidIO space start address. + * @size: The mapping region size. + * @flags: Flags for mapping. 0 for using default flags. + * + * Return: 0 -- Success. + * + * This function will create the inbound mapping + * from rstart to lstart. + */ +static int tsi721_rio_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart, + u64 rstart, u32 size, u32 flags) +{ + struct tsi721_device *priv = mport->priv; + int i; + u32 regval; + + if (!is_power_of_2(size) || size < 0x1000 || + ((u64)lstart & (size - 1)) || (rstart & (size - 1))) + return -EINVAL; + + /* Search for free inbound translation window */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (!(regval & TSI721_IBWIN_LB_WEN)) + break; + } + + if (i >= TSI721_IBWIN_NUM) { + dev_err(&priv->pdev->dev, + "Unable to find free inbound window\n"); + return -EBUSY; + } + + iowrite32(TSI721_IBWIN_SIZE(size) << 8, + priv->regs + TSI721_IBWIN_SZ(i)); + + iowrite32(((u64)lstart >> 32), priv->regs + TSI721_IBWIN_TUA(i)); + iowrite32(((u64)lstart & TSI721_IBWIN_TLA_ADD), + priv->regs + TSI721_IBWIN_TLA(i)); + + iowrite32(rstart >> 32, priv->regs + TSI721_IBWIN_UB(i)); + iowrite32((rstart & TSI721_IBWIN_LB_BA) | TSI721_IBWIN_LB_WEN, + priv->regs + TSI721_IBWIN_LB(i)); + dev_dbg(&priv->pdev->dev, + "Configured IBWIN%d mapping (RIO_0x%llx -> PCIe_0x%llx)\n", + i, rstart, (unsigned long long)lstart); + + return 0; +} + +/** + * fsl_rio_unmap_inb_mem -- Unmapping inbound memory region. + * @mport: RapidIO master port + * @lstart: Local memory space start address. + */ +static void tsi721_rio_unmap_inb_mem(struct rio_mport *mport, + dma_addr_t lstart) +{ + struct tsi721_device *priv = mport->priv; + int i; + u64 addr; + u32 regval; + + /* Search for matching active inbound translation window */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) { + regval = ioread32(priv->regs + TSI721_IBWIN_LB(i)); + if (regval & TSI721_IBWIN_LB_WEN) { + regval = ioread32(priv->regs + TSI721_IBWIN_TUA(i)); + addr = (u64)regval << 32; + regval = ioread32(priv->regs + TSI721_IBWIN_TLA(i)); + addr |= regval & TSI721_IBWIN_TLA_ADD; + + if (addr == (u64)lstart) { + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); + break; + } + } + } +} + +/** + * tsi721_init_sr2pc_mapping - initializes inbound (SRIO->PCIe) + * translation regions. + * @priv: pointer to tsi721 private data + * + * Disables inbound windows. + */ +static void tsi721_init_sr2pc_mapping(struct tsi721_device *priv) +{ + int i; + + /* Disable all SR2PC inbound windows */ + for (i = 0; i < TSI721_IBWIN_NUM; i++) + iowrite32(0, priv->regs + TSI721_IBWIN_LB(i)); +} + +/** + * tsi721_port_write_init - Inbound port write interface init + * @priv: pointer to tsi721 private data + * + * Initializes inbound port write handler. + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_port_write_init(struct tsi721_device *priv) +{ + priv->pw_discard_count = 0; + INIT_WORK(&priv->pw_work, tsi721_pw_dpc); + spin_lock_init(&priv->pw_fifo_lock); + if (kfifo_alloc(&priv->pw_fifo, + TSI721_RIO_PW_MSG_SIZE * 32, GFP_KERNEL)) { + dev_err(&priv->pdev->dev, "PW FIFO allocation failed\n"); + return -ENOMEM; + } + + /* Use reliable port-write capture mode */ + iowrite32(TSI721_RIO_PW_CTL_PWC_REL, priv->regs + TSI721_RIO_PW_CTL); + return 0; +} + +static int tsi721_doorbell_init(struct tsi721_device *priv) +{ + /* Outbound Doorbells do not require any setup. + * Tsi721 uses dedicated PCI BAR1 to generate doorbells. + * That BAR1 was mapped during the probe routine. + */ + + /* Initialize Inbound Doorbell processing DPC and queue */ + priv->db_discard_count = 0; + INIT_WORK(&priv->idb_work, tsi721_db_dpc); + + /* Allocate buffer for inbound doorbells queue */ + priv->idb_base = dma_zalloc_coherent(&priv->pdev->dev, + IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, + &priv->idb_dma, GFP_KERNEL); + if (!priv->idb_base) + return -ENOMEM; + + dev_dbg(&priv->pdev->dev, "Allocated IDB buffer @ %p (phys = %llx)\n", + priv->idb_base, (unsigned long long)priv->idb_dma); + + iowrite32(TSI721_IDQ_SIZE_VAL(IDB_QSIZE), + priv->regs + TSI721_IDQ_SIZE(IDB_QUEUE)); + iowrite32(((u64)priv->idb_dma >> 32), + priv->regs + TSI721_IDQ_BASEU(IDB_QUEUE)); + iowrite32(((u64)priv->idb_dma & TSI721_IDQ_BASEL_ADDR), + priv->regs + TSI721_IDQ_BASEL(IDB_QUEUE)); + /* Enable accepting all inbound doorbells */ + iowrite32(0, priv->regs + TSI721_IDQ_MASK(IDB_QUEUE)); + + iowrite32(TSI721_IDQ_INIT, priv->regs + TSI721_IDQ_CTL(IDB_QUEUE)); + + iowrite32(0, priv->regs + TSI721_IDQ_RP(IDB_QUEUE)); + + return 0; +} + +static void tsi721_doorbell_free(struct tsi721_device *priv) +{ + if (priv->idb_base == NULL) + return; + + /* Free buffer allocated for inbound doorbell queue */ + dma_free_coherent(&priv->pdev->dev, IDB_QSIZE * TSI721_IDB_ENTRY_SIZE, + priv->idb_base, priv->idb_dma); + priv->idb_base = NULL; +} + +/** + * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel. + * @priv: pointer to tsi721 private data + * + * Initialize BDMA channel allocated for RapidIO maintenance read/write + * request generation + * Returns %0 on success or %-ENOMEM on failure. + */ +static int tsi721_bdma_maint_init(struct tsi721_device *priv) +{ + struct tsi721_dma_desc *bd_ptr; + u64 *sts_ptr; + dma_addr_t bd_phys, sts_phys; + int sts_size; + int bd_num = 2; + void __iomem *regs; + + dev_dbg(&priv->pdev->dev, + "Init Block DMA Engine for Maintenance requests, CH%d\n", + TSI721_DMACH_MAINT); + + /* + * Initialize DMA channel for maintenance requests + */ + + priv->mdma.ch_id = TSI721_DMACH_MAINT; + regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT); + + /* Allocate space for DMA descriptors */ + bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, + bd_num * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_KERNEL); + if (!bd_ptr) + return -ENOMEM; + + priv->mdma.bd_num = bd_num; + priv->mdma.bd_phys = bd_phys; + priv->mdma.bd_base = bd_ptr; + + dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", + bd_ptr, (unsigned long long)bd_phys); + + /* Allocate space for descriptor status FIFO */ + sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ? + bd_num : TSI721_DMA_MINSTSSZ; + sts_size = roundup_pow_of_two(sts_size); + sts_ptr = dma_zalloc_coherent(&priv->pdev->dev, + sts_size * sizeof(struct tsi721_dma_sts), + &sts_phys, GFP_KERNEL); + if (!sts_ptr) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(&priv->pdev->dev, + bd_num * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + priv->mdma.bd_base = NULL; + return -ENOMEM; + } + + priv->mdma.sts_phys = sts_phys; + priv->mdma.sts_base = sts_ptr; + priv->mdma.sts_size = sts_size; + + dev_dbg(&priv->pdev->dev, + "desc status FIFO @ %p (phys = %llx) size=0x%x\n", + sts_ptr, (unsigned long long)sts_phys, sts_size); + + /* Initialize DMA descriptors ring */ + bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29); + bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys & + TSI721_DMAC_DPTRL_MASK); + bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), + regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH); + iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), + regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), + regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT); + + ioread32(regs + TSI721_DMAC_INT); + + /* Toggle DMA channel initialization */ + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + ioread32(regs + TSI721_DMAC_CTL); + udelay(10); + + return 0; +} + +static int tsi721_bdma_maint_free(struct tsi721_device *priv) +{ + u32 ch_stat; + struct tsi721_bdma_maint *mdma = &priv->mdma; + void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id); + + if (mdma->bd_base == NULL) + return 0; + + /* Check if DMA channel still running */ + ch_stat = ioread32(regs + TSI721_DMAC_STS); + if (ch_stat & TSI721_DMAC_STS_RUN) + return -EFAULT; + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL); + + /* Free space allocated for DMA descriptors */ + dma_free_coherent(&priv->pdev->dev, + mdma->bd_num * sizeof(struct tsi721_dma_desc), + mdma->bd_base, mdma->bd_phys); + mdma->bd_base = NULL; + + /* Free space allocated for status FIFO */ + dma_free_coherent(&priv->pdev->dev, + mdma->sts_size * sizeof(struct tsi721_dma_sts), + mdma->sts_base, mdma->sts_phys); + mdma->sts_base = NULL; + return 0; +} + +/* Enable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Inbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* Enable Inbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); + iowrite32(rval | inte_mask, priv->regs + TSI721_IBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to enable next levels + */ + + /* Enable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + iowrite32(rval | TSI721_INT_IMSG_CHAN(ch), + priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Inbound Messaging Interrupts */ +static void +tsi721_imsg_interrupt_disable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Inbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* Disable Inbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_IBDMAC_INTE(ch)); + rval &= ~inte_mask; + iowrite32(rval, priv->regs + TSI721_IBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to disable next levels + */ + + /* Disable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + rval &= ~TSI721_INT_IMSG_CHAN(ch); + iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Enable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_enable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Outbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + + /* Enable Outbound Messaging channel interrupts */ + rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); + iowrite32(rval | inte_mask, priv->regs + TSI721_OBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to enable next levels + */ + + /* Enable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + iowrite32(rval | TSI721_INT_OMSG_CHAN(ch), + priv->regs + TSI721_DEV_CHAN_INTE); +} + +/* Disable Outbound Messaging interrupts */ +static void +tsi721_omsg_interrupt_disable(struct tsi721_device *priv, int ch, + u32 inte_mask) +{ + u32 rval; + + if (!inte_mask) + return; + + /* Clear pending Outbound Messaging interrupts */ + iowrite32(inte_mask, priv->regs + TSI721_OBDMAC_INT(ch)); + + /* Disable Outbound Messaging interrupts */ + rval = ioread32(priv->regs + TSI721_OBDMAC_INTE(ch)); + rval &= ~inte_mask; + iowrite32(rval, priv->regs + TSI721_OBDMAC_INTE(ch)); + + if (priv->flags & TSI721_USING_MSIX) + return; /* Finished if we are in MSI-X mode */ + + /* + * For MSI and INTA interrupt signalling we need to disable next levels + */ + + /* Disable Device Channel Interrupt */ + rval = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + rval &= ~TSI721_INT_OMSG_CHAN(ch); + iowrite32(rval, priv->regs + TSI721_DEV_CHAN_INTE); +} + +/** + * tsi721_add_outb_message - Add message to the Tsi721 outbound message queue + * @mport: Master port with outbound message queue + * @rdev: Target of outbound message + * @mbox: Outbound mailbox + * @buffer: Message to add to outbound queue + * @len: Length of message + */ +static int +tsi721_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, + void *buffer, size_t len) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_omsg_desc *desc; + u32 tx_slot; + + if (!priv->omsg_init[mbox] || + len > TSI721_MSG_MAX_SIZE || len < 8) + return -EINVAL; + + tx_slot = priv->omsg_ring[mbox].tx_slot; + + /* Copy copy message into transfer buffer */ + memcpy(priv->omsg_ring[mbox].omq_base[tx_slot], buffer, len); + + if (len & 0x7) + len += 8; + + /* Build descriptor associated with buffer */ + desc = priv->omsg_ring[mbox].omd_base; + desc[tx_slot].type_id = cpu_to_le32((DTYPE4 << 29) | rdev->destid); + if (tx_slot % 4 == 0) + desc[tx_slot].type_id |= cpu_to_le32(TSI721_OMD_IOF); + + desc[tx_slot].msg_info = + cpu_to_le32((mport->sys_size << 26) | (mbox << 22) | + (0xe << 12) | (len & 0xff8)); + desc[tx_slot].bufptr_lo = + cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] & + 0xffffffff); + desc[tx_slot].bufptr_hi = + cpu_to_le32((u64)priv->omsg_ring[mbox].omq_phys[tx_slot] >> 32); + + priv->omsg_ring[mbox].wr_count++; + + /* Go to next descriptor */ + if (++priv->omsg_ring[mbox].tx_slot == priv->omsg_ring[mbox].size) { + priv->omsg_ring[mbox].tx_slot = 0; + /* Move through the ring link descriptor at the end */ + priv->omsg_ring[mbox].wr_count++; + } + + mb(); + + /* Set new write count value */ + iowrite32(priv->omsg_ring[mbox].wr_count, + priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + + return 0; +} + +/** + * tsi721_omsg_handler - Outbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch: number of OB MSG channel to service + * + * Services channel interrupts from outbound messaging engine. + */ +static void tsi721_omsg_handler(struct tsi721_device *priv, int ch) +{ + u32 omsg_int; + + spin_lock(&priv->omsg_ring[ch].lock); + + omsg_int = ioread32(priv->regs + TSI721_OBDMAC_INT(ch)); + + if (omsg_int & TSI721_OBDMAC_INT_ST_FULL) + dev_info(&priv->pdev->dev, + "OB MBOX%d: Status FIFO is full\n", ch); + + if (omsg_int & (TSI721_OBDMAC_INT_DONE | TSI721_OBDMAC_INT_IOF_DONE)) { + u32 srd_ptr; + u64 *sts_ptr, last_ptr = 0, prev_ptr = 0; + int i, j; + u32 tx_slot; + + /* + * Find last successfully processed descriptor + */ + + /* Check and clear descriptor status FIFO entries */ + srd_ptr = priv->omsg_ring[ch].sts_rdptr; + sts_ptr = priv->omsg_ring[ch].sts_base; + j = srd_ptr * 8; + while (sts_ptr[j]) { + for (i = 0; i < 8 && sts_ptr[j]; i++, j++) { + prev_ptr = last_ptr; + last_ptr = le64_to_cpu(sts_ptr[j]); + sts_ptr[j] = 0; + } + + ++srd_ptr; + srd_ptr %= priv->omsg_ring[ch].sts_size; + j = srd_ptr * 8; + } + + if (last_ptr == 0) + goto no_sts_update; + + priv->omsg_ring[ch].sts_rdptr = srd_ptr; + iowrite32(srd_ptr, priv->regs + TSI721_OBDMAC_DSRP(ch)); + + if (!priv->mport->outb_msg[ch].mcback) + goto no_sts_update; + + /* Inform upper layer about transfer completion */ + + tx_slot = (last_ptr - (u64)priv->omsg_ring[ch].omd_phys)/ + sizeof(struct tsi721_omsg_desc); + + /* + * Check if this is a Link Descriptor (LD). + * If yes, ignore LD and use descriptor processed + * before LD. + */ + if (tx_slot == priv->omsg_ring[ch].size) { + if (prev_ptr) + tx_slot = (prev_ptr - + (u64)priv->omsg_ring[ch].omd_phys)/ + sizeof(struct tsi721_omsg_desc); + else + goto no_sts_update; + } + + /* Move slot index to the next message to be sent */ + ++tx_slot; + if (tx_slot == priv->omsg_ring[ch].size) + tx_slot = 0; + BUG_ON(tx_slot >= priv->omsg_ring[ch].size); + priv->mport->outb_msg[ch].mcback(priv->mport, + priv->omsg_ring[ch].dev_id, ch, + tx_slot); + } + +no_sts_update: + + if (omsg_int & TSI721_OBDMAC_INT_ERROR) { + /* + * Outbound message operation aborted due to error, + * reinitialize OB MSG channel + */ + + dev_dbg(&priv->pdev->dev, "OB MSG ABORT ch_stat=%x\n", + ioread32(priv->regs + TSI721_OBDMAC_STS(ch))); + + iowrite32(TSI721_OBDMAC_INT_ERROR, + priv->regs + TSI721_OBDMAC_INT(ch)); + iowrite32(TSI721_OBDMAC_CTL_INIT, + priv->regs + TSI721_OBDMAC_CTL(ch)); + ioread32(priv->regs + TSI721_OBDMAC_CTL(ch)); + + /* Inform upper level to clear all pending tx slots */ + if (priv->mport->outb_msg[ch].mcback) + priv->mport->outb_msg[ch].mcback(priv->mport, + priv->omsg_ring[ch].dev_id, ch, + priv->omsg_ring[ch].tx_slot); + /* Synch tx_slot tracking */ + iowrite32(priv->omsg_ring[ch].tx_slot, + priv->regs + TSI721_OBDMAC_DRDCNT(ch)); + ioread32(priv->regs + TSI721_OBDMAC_DRDCNT(ch)); + priv->omsg_ring[ch].wr_count = priv->omsg_ring[ch].tx_slot; + priv->omsg_ring[ch].sts_rdptr = 0; + } + + /* Clear channel interrupts */ + iowrite32(omsg_int, priv->regs + TSI721_OBDMAC_INT(ch)); + + if (!(priv->flags & TSI721_USING_MSIX)) { + u32 ch_inte; + + /* Re-enable channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte |= TSI721_INT_OMSG_CHAN(ch); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + } + + spin_unlock(&priv->omsg_ring[ch].lock); +} + +/** + * tsi721_open_outb_mbox - Initialize Tsi721 outbound mailbox + * @mport: Master port implementing Outbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the outbound mailbox ring + */ +static int tsi721_open_outb_mbox(struct rio_mport *mport, void *dev_id, + int mbox, int entries) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_omsg_desc *bd_ptr; + int i, rc = 0; + + if ((entries < TSI721_OMSGD_MIN_RING_SIZE) || + (entries > (TSI721_OMSGD_RING_SIZE)) || + (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { + rc = -EINVAL; + goto out; + } + + priv->omsg_ring[mbox].dev_id = dev_id; + priv->omsg_ring[mbox].size = entries; + priv->omsg_ring[mbox].sts_rdptr = 0; + spin_lock_init(&priv->omsg_ring[mbox].lock); + + /* Outbound Msg Buffer allocation based on + the number of maximum descriptor entries */ + for (i = 0; i < entries; i++) { + priv->omsg_ring[mbox].omq_base[i] = + dma_alloc_coherent( + &priv->pdev->dev, TSI721_MSG_BUFFER_SIZE, + &priv->omsg_ring[mbox].omq_phys[i], + GFP_KERNEL); + if (priv->omsg_ring[mbox].omq_base[i] == NULL) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate OB MSG data buffer for" + " MBOX%d\n", mbox); + rc = -ENOMEM; + goto out_buf; + } + } + + /* Outbound message descriptor allocation */ + priv->omsg_ring[mbox].omd_base = dma_alloc_coherent( + &priv->pdev->dev, + (entries + 1) * sizeof(struct tsi721_omsg_desc), + &priv->omsg_ring[mbox].omd_phys, GFP_KERNEL); + if (priv->omsg_ring[mbox].omd_base == NULL) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate OB MSG descriptor memory " + "for MBOX%d\n", mbox); + rc = -ENOMEM; + goto out_buf; + } + + priv->omsg_ring[mbox].tx_slot = 0; + + /* Outbound message descriptor status FIFO allocation */ + priv->omsg_ring[mbox].sts_size = roundup_pow_of_two(entries + 1); + priv->omsg_ring[mbox].sts_base = dma_zalloc_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * + sizeof(struct tsi721_dma_sts), + &priv->omsg_ring[mbox].sts_phys, GFP_KERNEL); + if (priv->omsg_ring[mbox].sts_base == NULL) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate OB MSG descriptor status FIFO " + "for MBOX%d\n", mbox); + rc = -ENOMEM; + goto out_desc; + } + + /* + * Configure Outbound Messaging Engine + */ + + /* Setup Outbound Message descriptor pointer */ + iowrite32(((u64)priv->omsg_ring[mbox].omd_phys >> 32), + priv->regs + TSI721_OBDMAC_DPTRH(mbox)); + iowrite32(((u64)priv->omsg_ring[mbox].omd_phys & + TSI721_OBDMAC_DPTRL_MASK), + priv->regs + TSI721_OBDMAC_DPTRL(mbox)); + + /* Setup Outbound Message descriptor status FIFO */ + iowrite32(((u64)priv->omsg_ring[mbox].sts_phys >> 32), + priv->regs + TSI721_OBDMAC_DSBH(mbox)); + iowrite32(((u64)priv->omsg_ring[mbox].sts_phys & + TSI721_OBDMAC_DSBL_MASK), + priv->regs + TSI721_OBDMAC_DSBL(mbox)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(priv->omsg_ring[mbox].sts_size), + priv->regs + (u32)TSI721_OBDMAC_DSSZ(mbox)); + + /* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + /* Request interrupt service if we are in MSI-X mode */ + rc = request_irq( + priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, + tsi721_omsg_msix, 0, + priv->msix[TSI721_VECT_OMB0_DONE + mbox].irq_name, + (void *)mport); + + if (rc) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate MSI-X interrupt for " + "OBOX%d-DONE\n", mbox); + goto out_stat; + } + + rc = request_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, + tsi721_omsg_msix, 0, + priv->msix[TSI721_VECT_OMB0_INT + mbox].irq_name, + (void *)mport); + + if (rc) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate MSI-X interrupt for " + "MBOX%d-INT\n", mbox); + free_irq( + priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, + (void *)mport); + goto out_stat; + } + } +#endif /* CONFIG_PCI_MSI */ + + tsi721_omsg_interrupt_enable(priv, mbox, TSI721_OBDMAC_INT_ALL); + + /* Initialize Outbound Message descriptors ring */ + bd_ptr = priv->omsg_ring[mbox].omd_base; + bd_ptr[entries].type_id = cpu_to_le32(DTYPE5 << 29); + bd_ptr[entries].msg_info = 0; + bd_ptr[entries].next_lo = + cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys & + TSI721_OBDMAC_DPTRL_MASK); + bd_ptr[entries].next_hi = + cpu_to_le32((u64)priv->omsg_ring[mbox].omd_phys >> 32); + priv->omsg_ring[mbox].wr_count = 0; + mb(); + + /* Initialize Outbound Message engine */ + iowrite32(TSI721_OBDMAC_CTL_INIT, priv->regs + TSI721_OBDMAC_CTL(mbox)); + ioread32(priv->regs + TSI721_OBDMAC_DWRCNT(mbox)); + udelay(10); + + priv->omsg_init[mbox] = 1; + + return 0; + +#ifdef CONFIG_PCI_MSI +out_stat: + dma_free_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + priv->omsg_ring[mbox].sts_base, + priv->omsg_ring[mbox].sts_phys); + + priv->omsg_ring[mbox].sts_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_desc: + dma_free_coherent(&priv->pdev->dev, + (entries + 1) * sizeof(struct tsi721_omsg_desc), + priv->omsg_ring[mbox].omd_base, + priv->omsg_ring[mbox].omd_phys); + + priv->omsg_ring[mbox].omd_base = NULL; + +out_buf: + for (i = 0; i < priv->omsg_ring[mbox].size; i++) { + if (priv->omsg_ring[mbox].omq_base[i]) { + dma_free_coherent(&priv->pdev->dev, + TSI721_MSG_BUFFER_SIZE, + priv->omsg_ring[mbox].omq_base[i], + priv->omsg_ring[mbox].omq_phys[i]); + + priv->omsg_ring[mbox].omq_base[i] = NULL; + } + } + +out: + return rc; +} + +/** + * tsi721_close_outb_mbox - Close Tsi721 outbound mailbox + * @mport: Master port implementing the outbound message unit + * @mbox: Mailbox to close + */ +static void tsi721_close_outb_mbox(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + u32 i; + + if (!priv->omsg_init[mbox]) + return; + priv->omsg_init[mbox] = 0; + + /* Disable Interrupts */ + + tsi721_omsg_interrupt_disable(priv, mbox, TSI721_OBDMAC_INT_ALL); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_OMB0_DONE + mbox].vector, + (void *)mport); + free_irq(priv->msix[TSI721_VECT_OMB0_INT + mbox].vector, + (void *)mport); + } +#endif /* CONFIG_PCI_MSI */ + + /* Free OMSG Descriptor Status FIFO */ + dma_free_coherent(&priv->pdev->dev, + priv->omsg_ring[mbox].sts_size * sizeof(struct tsi721_dma_sts), + priv->omsg_ring[mbox].sts_base, + priv->omsg_ring[mbox].sts_phys); + + priv->omsg_ring[mbox].sts_base = NULL; + + /* Free OMSG descriptors */ + dma_free_coherent(&priv->pdev->dev, + (priv->omsg_ring[mbox].size + 1) * + sizeof(struct tsi721_omsg_desc), + priv->omsg_ring[mbox].omd_base, + priv->omsg_ring[mbox].omd_phys); + + priv->omsg_ring[mbox].omd_base = NULL; + + /* Free message buffers */ + for (i = 0; i < priv->omsg_ring[mbox].size; i++) { + if (priv->omsg_ring[mbox].omq_base[i]) { + dma_free_coherent(&priv->pdev->dev, + TSI721_MSG_BUFFER_SIZE, + priv->omsg_ring[mbox].omq_base[i], + priv->omsg_ring[mbox].omq_phys[i]); + + priv->omsg_ring[mbox].omq_base[i] = NULL; + } + } +} + +/** + * tsi721_imsg_handler - Inbound Message Interrupt Handler + * @priv: pointer to tsi721 private data + * @ch: inbound message channel number to service + * + * Services channel interrupts from inbound messaging engine. + */ +static void tsi721_imsg_handler(struct tsi721_device *priv, int ch) +{ + u32 mbox = ch - 4; + u32 imsg_int; + + spin_lock(&priv->imsg_ring[mbox].lock); + + imsg_int = ioread32(priv->regs + TSI721_IBDMAC_INT(ch)); + + if (imsg_int & TSI721_IBDMAC_INT_SRTO) + dev_info(&priv->pdev->dev, "IB MBOX%d SRIO timeout\n", + mbox); + + if (imsg_int & TSI721_IBDMAC_INT_PC_ERROR) + dev_info(&priv->pdev->dev, "IB MBOX%d PCIe error\n", + mbox); + + if (imsg_int & TSI721_IBDMAC_INT_FQ_LOW) + dev_info(&priv->pdev->dev, + "IB MBOX%d IB free queue low\n", mbox); + + /* Clear IB channel interrupts */ + iowrite32(imsg_int, priv->regs + TSI721_IBDMAC_INT(ch)); + + /* If an IB Msg is received notify the upper layer */ + if (imsg_int & TSI721_IBDMAC_INT_DQ_RCV && + priv->mport->inb_msg[mbox].mcback) + priv->mport->inb_msg[mbox].mcback(priv->mport, + priv->imsg_ring[mbox].dev_id, mbox, -1); + + if (!(priv->flags & TSI721_USING_MSIX)) { + u32 ch_inte; + + /* Re-enable channel interrupts */ + ch_inte = ioread32(priv->regs + TSI721_DEV_CHAN_INTE); + ch_inte |= TSI721_INT_IMSG_CHAN(ch); + iowrite32(ch_inte, priv->regs + TSI721_DEV_CHAN_INTE); + } + + spin_unlock(&priv->imsg_ring[mbox].lock); +} + +/** + * tsi721_open_inb_mbox - Initialize Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox to open + * @entries: Number of entries in the inbound mailbox ring + */ +static int tsi721_open_inb_mbox(struct rio_mport *mport, void *dev_id, + int mbox, int entries) +{ + struct tsi721_device *priv = mport->priv; + int ch = mbox + 4; + int i; + u64 *free_ptr; + int rc = 0; + + if ((entries < TSI721_IMSGD_MIN_RING_SIZE) || + (entries > TSI721_IMSGD_RING_SIZE) || + (!is_power_of_2(entries)) || mbox >= RIO_MAX_MBOX) { + rc = -EINVAL; + goto out; + } + + /* Initialize IB Messaging Ring */ + priv->imsg_ring[mbox].dev_id = dev_id; + priv->imsg_ring[mbox].size = entries; + priv->imsg_ring[mbox].rx_slot = 0; + priv->imsg_ring[mbox].desc_rdptr = 0; + priv->imsg_ring[mbox].fq_wrptr = 0; + for (i = 0; i < priv->imsg_ring[mbox].size; i++) + priv->imsg_ring[mbox].imq_base[i] = NULL; + spin_lock_init(&priv->imsg_ring[mbox].lock); + + /* Allocate buffers for incoming messages */ + priv->imsg_ring[mbox].buf_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * TSI721_MSG_BUFFER_SIZE, + &priv->imsg_ring[mbox].buf_phys, + GFP_KERNEL); + + if (priv->imsg_ring[mbox].buf_base == NULL) { + dev_err(&priv->pdev->dev, + "Failed to allocate buffers for IB MBOX%d\n", mbox); + rc = -ENOMEM; + goto out; + } + + /* Allocate memory for circular free list */ + priv->imsg_ring[mbox].imfq_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * 8, + &priv->imsg_ring[mbox].imfq_phys, + GFP_KERNEL); + + if (priv->imsg_ring[mbox].imfq_base == NULL) { + dev_err(&priv->pdev->dev, + "Failed to allocate free queue for IB MBOX%d\n", mbox); + rc = -ENOMEM; + goto out_buf; + } + + /* Allocate memory for Inbound message descriptors */ + priv->imsg_ring[mbox].imd_base = + dma_alloc_coherent(&priv->pdev->dev, + entries * sizeof(struct tsi721_imsg_desc), + &priv->imsg_ring[mbox].imd_phys, GFP_KERNEL); + + if (priv->imsg_ring[mbox].imd_base == NULL) { + dev_err(&priv->pdev->dev, + "Failed to allocate descriptor memory for IB MBOX%d\n", + mbox); + rc = -ENOMEM; + goto out_dma; + } + + /* Fill free buffer pointer list */ + free_ptr = priv->imsg_ring[mbox].imfq_base; + for (i = 0; i < entries; i++) + free_ptr[i] = cpu_to_le64( + (u64)(priv->imsg_ring[mbox].buf_phys) + + i * 0x1000); + + mb(); + + /* + * For mapping of inbound SRIO Messages into appropriate queues we need + * to set Inbound Device ID register in the messaging engine. We do it + * once when first inbound mailbox is requested. + */ + if (!(priv->flags & TSI721_IMSGID_SET)) { + iowrite32((u32)priv->mport->host_deviceid, + priv->regs + TSI721_IB_DEVID); + priv->flags |= TSI721_IMSGID_SET; + } + + /* + * Configure Inbound Messaging channel (ch = mbox + 4) + */ + + /* Setup Inbound Message free queue */ + iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys >> 32), + priv->regs + TSI721_IBDMAC_FQBH(ch)); + iowrite32(((u64)priv->imsg_ring[mbox].imfq_phys & + TSI721_IBDMAC_FQBL_MASK), + priv->regs+TSI721_IBDMAC_FQBL(ch)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), + priv->regs + TSI721_IBDMAC_FQSZ(ch)); + + /* Setup Inbound Message descriptor queue */ + iowrite32(((u64)priv->imsg_ring[mbox].imd_phys >> 32), + priv->regs + TSI721_IBDMAC_DQBH(ch)); + iowrite32(((u32)priv->imsg_ring[mbox].imd_phys & + (u32)TSI721_IBDMAC_DQBL_MASK), + priv->regs+TSI721_IBDMAC_DQBL(ch)); + iowrite32(TSI721_DMAC_DSSZ_SIZE(entries), + priv->regs + TSI721_IBDMAC_DQSZ(ch)); + + /* Enable interrupts */ + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + /* Request interrupt service if we are in MSI-X mode */ + rc = request_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, + tsi721_imsg_msix, 0, + priv->msix[TSI721_VECT_IMB0_RCV + mbox].irq_name, + (void *)mport); + + if (rc) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate MSI-X interrupt for " + "IBOX%d-DONE\n", mbox); + goto out_desc; + } + + rc = request_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, + tsi721_imsg_msix, 0, + priv->msix[TSI721_VECT_IMB0_INT + mbox].irq_name, + (void *)mport); + + if (rc) { + dev_dbg(&priv->pdev->dev, + "Unable to allocate MSI-X interrupt for " + "IBOX%d-INT\n", mbox); + free_irq( + priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, + (void *)mport); + goto out_desc; + } + } +#endif /* CONFIG_PCI_MSI */ + + tsi721_imsg_interrupt_enable(priv, ch, TSI721_IBDMAC_INT_ALL); + + /* Initialize Inbound Message Engine */ + iowrite32(TSI721_IBDMAC_CTL_INIT, priv->regs + TSI721_IBDMAC_CTL(ch)); + ioread32(priv->regs + TSI721_IBDMAC_CTL(ch)); + udelay(10); + priv->imsg_ring[mbox].fq_wrptr = entries - 1; + iowrite32(entries - 1, priv->regs + TSI721_IBDMAC_FQWP(ch)); + + priv->imsg_init[mbox] = 1; + return 0; + +#ifdef CONFIG_PCI_MSI +out_desc: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), + priv->imsg_ring[mbox].imd_base, + priv->imsg_ring[mbox].imd_phys); + + priv->imsg_ring[mbox].imd_base = NULL; +#endif /* CONFIG_PCI_MSI */ + +out_dma: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * 8, + priv->imsg_ring[mbox].imfq_base, + priv->imsg_ring[mbox].imfq_phys); + + priv->imsg_ring[mbox].imfq_base = NULL; + +out_buf: + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, + priv->imsg_ring[mbox].buf_base, + priv->imsg_ring[mbox].buf_phys); + + priv->imsg_ring[mbox].buf_base = NULL; + +out: + return rc; +} + +/** + * tsi721_close_inb_mbox - Shut down Tsi721 inbound mailbox + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Mailbox to close + */ +static void tsi721_close_inb_mbox(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + u32 rx_slot; + int ch = mbox + 4; + + if (!priv->imsg_init[mbox]) /* mbox isn't initialized yet */ + return; + priv->imsg_init[mbox] = 0; + + /* Disable Inbound Messaging Engine */ + + /* Disable Interrupts */ + tsi721_imsg_interrupt_disable(priv, ch, TSI721_OBDMAC_INT_MASK); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_IMB0_RCV + mbox].vector, + (void *)mport); + free_irq(priv->msix[TSI721_VECT_IMB0_INT + mbox].vector, + (void *)mport); + } +#endif /* CONFIG_PCI_MSI */ + + /* Clear Inbound Buffer Queue */ + for (rx_slot = 0; rx_slot < priv->imsg_ring[mbox].size; rx_slot++) + priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + + /* Free memory allocated for message buffers */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * TSI721_MSG_BUFFER_SIZE, + priv->imsg_ring[mbox].buf_base, + priv->imsg_ring[mbox].buf_phys); + + priv->imsg_ring[mbox].buf_base = NULL; + + /* Free memory allocated for free pointr list */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * 8, + priv->imsg_ring[mbox].imfq_base, + priv->imsg_ring[mbox].imfq_phys); + + priv->imsg_ring[mbox].imfq_base = NULL; + + /* Free memory allocated for RX descriptors */ + dma_free_coherent(&priv->pdev->dev, + priv->imsg_ring[mbox].size * sizeof(struct tsi721_imsg_desc), + priv->imsg_ring[mbox].imd_base, + priv->imsg_ring[mbox].imd_phys); + + priv->imsg_ring[mbox].imd_base = NULL; +} + +/** + * tsi721_add_inb_buffer - Add buffer to the Tsi721 inbound message queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * @buf: Buffer to add to inbound queue + */ +static int tsi721_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) +{ + struct tsi721_device *priv = mport->priv; + u32 rx_slot; + int rc = 0; + + rx_slot = priv->imsg_ring[mbox].rx_slot; + if (priv->imsg_ring[mbox].imq_base[rx_slot]) { + dev_err(&priv->pdev->dev, + "Error adding inbound buffer %d, buffer exists\n", + rx_slot); + rc = -EINVAL; + goto out; + } + + priv->imsg_ring[mbox].imq_base[rx_slot] = buf; + + if (++priv->imsg_ring[mbox].rx_slot == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].rx_slot = 0; + +out: + return rc; +} + +/** + * tsi721_get_inb_message - Fetch inbound message from the Tsi721 MSG Queue + * @mport: Master port implementing the Inbound Messaging Engine + * @mbox: Inbound mailbox number + * + * Returns pointer to the message on success or NULL on failure. + */ +static void *tsi721_get_inb_message(struct rio_mport *mport, int mbox) +{ + struct tsi721_device *priv = mport->priv; + struct tsi721_imsg_desc *desc; + u32 rx_slot; + void *rx_virt = NULL; + u64 rx_phys; + void *buf = NULL; + u64 *free_ptr; + int ch = mbox + 4; + int msg_size; + + if (!priv->imsg_init[mbox]) + return NULL; + + desc = priv->imsg_ring[mbox].imd_base; + desc += priv->imsg_ring[mbox].desc_rdptr; + + if (!(le32_to_cpu(desc->msg_info) & TSI721_IMD_HO)) + goto out; + + rx_slot = priv->imsg_ring[mbox].rx_slot; + while (priv->imsg_ring[mbox].imq_base[rx_slot] == NULL) { + if (++rx_slot == priv->imsg_ring[mbox].size) + rx_slot = 0; + } + + rx_phys = ((u64)le32_to_cpu(desc->bufptr_hi) << 32) | + le32_to_cpu(desc->bufptr_lo); + + rx_virt = priv->imsg_ring[mbox].buf_base + + (rx_phys - (u64)priv->imsg_ring[mbox].buf_phys); + + buf = priv->imsg_ring[mbox].imq_base[rx_slot]; + msg_size = le32_to_cpu(desc->msg_info) & TSI721_IMD_BCOUNT; + if (msg_size == 0) + msg_size = RIO_MAX_MSG_SIZE; + + memcpy(buf, rx_virt, msg_size); + priv->imsg_ring[mbox].imq_base[rx_slot] = NULL; + + desc->msg_info &= cpu_to_le32(~TSI721_IMD_HO); + if (++priv->imsg_ring[mbox].desc_rdptr == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].desc_rdptr = 0; + + iowrite32(priv->imsg_ring[mbox].desc_rdptr, + priv->regs + TSI721_IBDMAC_DQRP(ch)); + + /* Return free buffer into the pointer list */ + free_ptr = priv->imsg_ring[mbox].imfq_base; + free_ptr[priv->imsg_ring[mbox].fq_wrptr] = cpu_to_le64(rx_phys); + + if (++priv->imsg_ring[mbox].fq_wrptr == priv->imsg_ring[mbox].size) + priv->imsg_ring[mbox].fq_wrptr = 0; + + iowrite32(priv->imsg_ring[mbox].fq_wrptr, + priv->regs + TSI721_IBDMAC_FQWP(ch)); +out: + return buf; +} + +/** + * tsi721_messages_init - Initialization of Messaging Engine + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 messaging engine. + */ +static int tsi721_messages_init(struct tsi721_device *priv) +{ + int ch; + + iowrite32(0, priv->regs + TSI721_SMSG_ECC_LOG); + iowrite32(0, priv->regs + TSI721_RETRY_GEN_CNT); + iowrite32(0, priv->regs + TSI721_RETRY_RX_CNT); + + /* Set SRIO Message Request/Response Timeout */ + iowrite32(TSI721_RQRPTO_VAL, priv->regs + TSI721_RQRPTO); + + /* Initialize Inbound Messaging Engine Registers */ + for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) { + /* Clear interrupt bits */ + iowrite32(TSI721_IBDMAC_INT_MASK, + priv->regs + TSI721_IBDMAC_INT(ch)); + /* Clear Status */ + iowrite32(0, priv->regs + TSI721_IBDMAC_STS(ch)); + + iowrite32(TSI721_SMSG_ECC_COR_LOG_MASK, + priv->regs + TSI721_SMSG_ECC_COR_LOG(ch)); + iowrite32(TSI721_SMSG_ECC_NCOR_MASK, + priv->regs + TSI721_SMSG_ECC_NCOR(ch)); + } + + return 0; +} + +/** + * tsi721_disable_ints - disables all device interrupts + * @priv: pointer to tsi721 private data + */ +static void tsi721_disable_ints(struct tsi721_device *priv) +{ + int ch; + + /* Disable all device level interrupts */ + iowrite32(0, priv->regs + TSI721_DEV_INTE); + + /* Disable all Device Channel interrupts */ + iowrite32(0, priv->regs + TSI721_DEV_CHAN_INTE); + + /* Disable all Inbound Msg Channel interrupts */ + for (ch = 0; ch < TSI721_IMSG_CHNUM; ch++) + iowrite32(0, priv->regs + TSI721_IBDMAC_INTE(ch)); + + /* Disable all Outbound Msg Channel interrupts */ + for (ch = 0; ch < TSI721_OMSG_CHNUM; ch++) + iowrite32(0, priv->regs + TSI721_OBDMAC_INTE(ch)); + + /* Disable all general messaging interrupts */ + iowrite32(0, priv->regs + TSI721_SMSG_INTE); + + /* Disable all BDMA Channel interrupts */ + for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) + iowrite32(0, + priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE); + + /* Disable all general BDMA interrupts */ + iowrite32(0, priv->regs + TSI721_BDMA_INTE); + + /* Disable all SRIO Channel interrupts */ + for (ch = 0; ch < TSI721_SRIO_MAXCH; ch++) + iowrite32(0, priv->regs + TSI721_SR_CHINTE(ch)); + + /* Disable all general SR2PC interrupts */ + iowrite32(0, priv->regs + TSI721_SR2PC_GEN_INTE); + + /* Disable all PC2SR interrupts */ + iowrite32(0, priv->regs + TSI721_PC2SR_INTE); + + /* Disable all I2C interrupts */ + iowrite32(0, priv->regs + TSI721_I2C_INT_ENABLE); + + /* Disable SRIO MAC interrupts */ + iowrite32(0, priv->regs + TSI721_RIO_EM_INT_ENABLE); + iowrite32(0, priv->regs + TSI721_RIO_EM_DEV_INT_EN); +} + +/** + * tsi721_setup_mport - Setup Tsi721 as RapidIO subsystem master port + * @priv: pointer to tsi721 private data + * + * Configures Tsi721 as RapidIO master port. + */ +static int tsi721_setup_mport(struct tsi721_device *priv) +{ + struct pci_dev *pdev = priv->pdev; + int err = 0; + struct rio_ops *ops; + + struct rio_mport *mport; + + ops = kzalloc(sizeof(struct rio_ops), GFP_KERNEL); + if (!ops) { + dev_dbg(&pdev->dev, "Unable to allocate memory for rio_ops\n"); + return -ENOMEM; + } + + ops->lcread = tsi721_lcread; + ops->lcwrite = tsi721_lcwrite; + ops->cread = tsi721_cread_dma; + ops->cwrite = tsi721_cwrite_dma; + ops->dsend = tsi721_dsend; + ops->open_inb_mbox = tsi721_open_inb_mbox; + ops->close_inb_mbox = tsi721_close_inb_mbox; + ops->open_outb_mbox = tsi721_open_outb_mbox; + ops->close_outb_mbox = tsi721_close_outb_mbox; + ops->add_outb_message = tsi721_add_outb_message; + ops->add_inb_buffer = tsi721_add_inb_buffer; + ops->get_inb_message = tsi721_get_inb_message; + ops->map_inb = tsi721_rio_map_inb_mem; + ops->unmap_inb = tsi721_rio_unmap_inb_mem; + + mport = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); + if (!mport) { + kfree(ops); + dev_dbg(&pdev->dev, "Unable to allocate memory for mport\n"); + return -ENOMEM; + } + + mport->ops = ops; + mport->index = 0; + mport->sys_size = 0; /* small system */ + mport->phy_type = RIO_PHY_SERIAL; + mport->priv = (void *)priv; + mport->phys_efptr = 0x100; + mport->dev.parent = &pdev->dev; + priv->mport = mport; + + INIT_LIST_HEAD(&mport->dbells); + + rio_init_dbell_res(&mport->riores[RIO_DOORBELL_RESOURCE], 0, 0xffff); + rio_init_mbox_res(&mport->riores[RIO_INB_MBOX_RESOURCE], 0, 3); + rio_init_mbox_res(&mport->riores[RIO_OUTB_MBOX_RESOURCE], 0, 3); + snprintf(mport->name, RIO_MAX_MPORT_NAME, "%s(%s)", + dev_driver_string(&pdev->dev), dev_name(&pdev->dev)); + + /* Hook up interrupt handler */ + +#ifdef CONFIG_PCI_MSI + if (!tsi721_enable_msix(priv)) + priv->flags |= TSI721_USING_MSIX; + else if (!pci_enable_msi(pdev)) + priv->flags |= TSI721_USING_MSI; + else + dev_info(&pdev->dev, + "MSI/MSI-X is not available. Using legacy INTx.\n"); +#endif /* CONFIG_PCI_MSI */ + + err = tsi721_request_irq(mport); + + if (!err) { + tsi721_interrupts_init(priv); + ops->pwenable = tsi721_pw_enable; + } else { + dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " + "vector %02X err=0x%x\n", pdev->irq, err); + goto err_exit; + } + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + tsi721_register_dma(priv); +#endif + /* Enable SRIO link */ + iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | + TSI721_DEVCTL_SRBOOT_CMPL, + priv->regs + TSI721_DEVCTL); + + rio_register_mport(mport); + + if (mport->host_deviceid >= 0) + iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | + RIO_PORT_GEN_DISCOVERED, + priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + else + iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); + + return 0; + +err_exit: + kfree(mport); + kfree(ops); + return err; +} + +static int tsi721_probe(struct pci_dev *pdev, + const struct pci_device_id *id) +{ + struct tsi721_device *priv; + int err; + + priv = kzalloc(sizeof(struct tsi721_device), GFP_KERNEL); + if (priv == NULL) { + dev_err(&pdev->dev, "Failed to allocate memory for device\n"); + err = -ENOMEM; + goto err_exit; + } + + err = pci_enable_device(pdev); + if (err) { + dev_err(&pdev->dev, "Failed to enable PCI device\n"); + goto err_clean; + } + + priv->pdev = pdev; + +#ifdef DEBUG + { + int i; + for (i = 0; i <= PCI_STD_RESOURCE_END; i++) { + dev_dbg(&pdev->dev, "res[%d] @ 0x%llx (0x%lx, 0x%lx)\n", + i, (unsigned long long)pci_resource_start(pdev, i), + (unsigned long)pci_resource_len(pdev, i), + pci_resource_flags(pdev, i)); + } + } +#endif + /* + * Verify BAR configuration + */ + + /* BAR_0 (registers) must be 512KB+ in 32-bit address space */ + if (!(pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM) || + pci_resource_flags(pdev, BAR_0) & IORESOURCE_MEM_64 || + pci_resource_len(pdev, BAR_0) < TSI721_REG_SPACE_SIZE) { + dev_err(&pdev->dev, + "Missing or misconfigured CSR BAR0, aborting.\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + /* BAR_1 (outbound doorbells) must be 16MB+ in 32-bit address space */ + if (!(pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM) || + pci_resource_flags(pdev, BAR_1) & IORESOURCE_MEM_64 || + pci_resource_len(pdev, BAR_1) < TSI721_DB_WIN_SIZE) { + dev_err(&pdev->dev, + "Missing or misconfigured Doorbell BAR1, aborting.\n"); + err = -ENODEV; + goto err_disable_pdev; + } + + /* + * BAR_2 and BAR_4 (outbound translation) must be in 64-bit PCIe address + * space. + * NOTE: BAR_2 and BAR_4 are not used by this version of driver. + * It may be a good idea to keep them disabled using HW configuration + * to save PCI memory space. + */ + if ((pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM) && + (pci_resource_flags(pdev, BAR_2) & IORESOURCE_MEM_64)) { + dev_info(&pdev->dev, "Outbound BAR2 is not used but enabled.\n"); + } + + if ((pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM) && + (pci_resource_flags(pdev, BAR_4) & IORESOURCE_MEM_64)) { + dev_info(&pdev->dev, "Outbound BAR4 is not used but enabled.\n"); + } + + err = pci_request_regions(pdev, DRV_NAME); + if (err) { + dev_err(&pdev->dev, "Cannot obtain PCI resources, " + "aborting.\n"); + goto err_disable_pdev; + } + + pci_set_master(pdev); + + priv->regs = pci_ioremap_bar(pdev, BAR_0); + if (!priv->regs) { + dev_err(&pdev->dev, + "Unable to map device registers space, aborting\n"); + err = -ENOMEM; + goto err_free_res; + } + + priv->odb_base = pci_ioremap_bar(pdev, BAR_1); + if (!priv->odb_base) { + dev_err(&pdev->dev, + "Unable to map outbound doorbells space, aborting\n"); + err = -ENOMEM; + goto err_unmap_bars; + } + + /* Configure DMA attributes. */ + if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); + if (err) { + dev_info(&pdev->dev, "Unable to set DMA mask\n"); + goto err_unmap_bars; + } + + if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) + dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); + } else { + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); + if (err) + dev_info(&pdev->dev, "Unable to set consistent DMA mask\n"); + } + + BUG_ON(!pci_is_pcie(pdev)); + + /* Clear "no snoop" and "relaxed ordering" bits, use default MRRS. */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL, + PCI_EXP_DEVCTL_READRQ | PCI_EXP_DEVCTL_RELAX_EN | + PCI_EXP_DEVCTL_NOSNOOP_EN, + PCI_EXP_DEVCTL_READRQ_512B); + + /* Adjust PCIe completion timeout. */ + pcie_capability_clear_and_set_word(pdev, PCI_EXP_DEVCTL2, 0xf, 0x2); + + /* + * FIXUP: correct offsets of MSI-X tables in the MSI-X Capability Block + */ + pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0x01); + pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXTBL, + TSI721_MSIXTBL_OFFSET); + pci_write_config_dword(pdev, TSI721_PCIECFG_MSIXPBA, + TSI721_MSIXPBA_OFFSET); + pci_write_config_dword(pdev, TSI721_PCIECFG_EPCTL, 0); + /* End of FIXUP */ + + tsi721_disable_ints(priv); + + tsi721_init_pc2sr_mapping(priv); + tsi721_init_sr2pc_mapping(priv); + + if (tsi721_bdma_maint_init(priv)) { + dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); + err = -ENOMEM; + goto err_unmap_bars; + } + + err = tsi721_doorbell_init(priv); + if (err) + goto err_free_bdma; + + tsi721_port_write_init(priv); + + err = tsi721_messages_init(priv); + if (err) + goto err_free_consistent; + + err = tsi721_setup_mport(priv); + if (err) + goto err_free_consistent; + + return 0; + +err_free_consistent: + tsi721_doorbell_free(priv); +err_free_bdma: + tsi721_bdma_maint_free(priv); +err_unmap_bars: + if (priv->regs) + iounmap(priv->regs); + if (priv->odb_base) + iounmap(priv->odb_base); +err_free_res: + pci_release_regions(pdev); + pci_clear_master(pdev); +err_disable_pdev: + pci_disable_device(pdev); +err_clean: + kfree(priv); +err_exit: + return err; +} + +static const struct pci_device_id tsi721_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_TSI721) }, + { 0, } /* terminate list */ +}; + +MODULE_DEVICE_TABLE(pci, tsi721_pci_tbl); + +static struct pci_driver tsi721_driver = { + .name = "tsi721", + .id_table = tsi721_pci_tbl, + .probe = tsi721_probe, +}; + +static int __init tsi721_init(void) +{ + return pci_register_driver(&tsi721_driver); +} + +device_initcall(tsi721_init); + +MODULE_DESCRIPTION("IDT Tsi721 PCIExpress-to-SRIO bridge driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h new file mode 100644 index 000000000..9d2502543 --- /dev/null +++ b/drivers/rapidio/devices/tsi721.h @@ -0,0 +1,853 @@ +/* + * Tsi721 PCIExpress-to-SRIO bridge definitions + * + * Copyright 2011, Integrated Device Technology, Inc. + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., 59 + * Temple Place - Suite 330, Boston, MA 02111-1307, USA. + */ + +#ifndef __TSI721_H +#define __TSI721_H + +#define DRV_NAME "tsi721" + +#define DEFAULT_HOPCOUNT 0xff +#define DEFAULT_DESTID 0xff + +/* PCI device ID */ +#define PCI_DEVICE_ID_TSI721 0x80ab + +#define BAR_0 0 +#define BAR_1 1 +#define BAR_2 2 +#define BAR_4 4 + +#define TSI721_PC2SR_BARS 2 +#define TSI721_PC2SR_WINS 8 +#define TSI721_PC2SR_ZONES 8 +#define TSI721_MAINT_WIN 0 /* Window for outbound maintenance requests */ +#define IDB_QUEUE 0 /* Inbound Doorbell Queue to use */ +#define IDB_QSIZE 512 /* Inbound Doorbell Queue size */ + +/* Memory space sizes */ +#define TSI721_REG_SPACE_SIZE (512 * 1024) /* 512K */ +#define TSI721_DB_WIN_SIZE (16 * 1024 * 1024) /* 16MB */ + +#define RIO_TT_CODE_8 0x00000000 +#define RIO_TT_CODE_16 0x00000001 + +#define TSI721_DMA_MAXCH 8 +#define TSI721_DMA_MINSTSSZ 32 +#define TSI721_DMA_STSBLKSZ 8 + +#define TSI721_SRIO_MAXCH 8 + +#define DBELL_SID(buf) (((u8)buf[2] << 8) | (u8)buf[3]) +#define DBELL_TID(buf) (((u8)buf[4] << 8) | (u8)buf[5]) +#define DBELL_INF(buf) (((u8)buf[0] << 8) | (u8)buf[1]) + +#define TSI721_RIO_PW_MSG_SIZE 16 /* Tsi721 saves only 16 bytes of PW msg */ + +/* Register definitions */ + +/* + * Registers in PCIe configuration space + */ + +#define TSI721_PCIECFG_MSIXTBL 0x0a4 +#define TSI721_MSIXTBL_OFFSET 0x2c000 +#define TSI721_PCIECFG_MSIXPBA 0x0a8 +#define TSI721_MSIXPBA_OFFSET 0x2a000 +#define TSI721_PCIECFG_EPCTL 0x400 + +/* + * Event Management Registers + */ + +#define TSI721_RIO_EM_INT_STAT 0x10910 +#define TSI721_RIO_EM_INT_STAT_PW_RX 0x00010000 + +#define TSI721_RIO_EM_INT_ENABLE 0x10914 +#define TSI721_RIO_EM_INT_ENABLE_PW_RX 0x00010000 + +#define TSI721_RIO_EM_DEV_INT_EN 0x10930 +#define TSI721_RIO_EM_DEV_INT_EN_INT 0x00000001 + +/* + * Port-Write Block Registers + */ + +#define TSI721_RIO_PW_CTL 0x10a04 +#define TSI721_RIO_PW_CTL_PW_TIMER 0xf0000000 +#define TSI721_RIO_PW_CTL_PWT_DIS (0 << 28) +#define TSI721_RIO_PW_CTL_PWT_103 (1 << 28) +#define TSI721_RIO_PW_CTL_PWT_205 (1 << 29) +#define TSI721_RIO_PW_CTL_PWT_410 (1 << 30) +#define TSI721_RIO_PW_CTL_PWT_820 (1 << 31) +#define TSI721_RIO_PW_CTL_PWC_MODE 0x01000000 +#define TSI721_RIO_PW_CTL_PWC_CONT 0x00000000 +#define TSI721_RIO_PW_CTL_PWC_REL 0x01000000 + +#define TSI721_RIO_PW_RX_STAT 0x10a10 +#define TSI721_RIO_PW_RX_STAT_WR_SIZE 0x0000f000 +#define TSI_RIO_PW_RX_STAT_WDPTR 0x00000100 +#define TSI721_RIO_PW_RX_STAT_PW_SHORT 0x00000008 +#define TSI721_RIO_PW_RX_STAT_PW_TRUNC 0x00000004 +#define TSI721_RIO_PW_RX_STAT_PW_DISC 0x00000002 +#define TSI721_RIO_PW_RX_STAT_PW_VAL 0x00000001 + +#define TSI721_RIO_PW_RX_CAPT(x) (0x10a20 + (x)*4) + +/* + * Inbound Doorbells + */ + +#define TSI721_IDB_ENTRY_SIZE 64 + +#define TSI721_IDQ_CTL(x) (0x20000 + (x) * 0x1000) +#define TSI721_IDQ_SUSPEND 0x00000002 +#define TSI721_IDQ_INIT 0x00000001 + +#define TSI721_IDQ_STS(x) (0x20004 + (x) * 0x1000) +#define TSI721_IDQ_RUN 0x00200000 + +#define TSI721_IDQ_MASK(x) (0x20008 + (x) * 0x1000) +#define TSI721_IDQ_MASK_MASK 0xffff0000 +#define TSI721_IDQ_MASK_PATT 0x0000ffff + +#define TSI721_IDQ_RP(x) (0x2000c + (x) * 0x1000) +#define TSI721_IDQ_RP_PTR 0x0007ffff + +#define TSI721_IDQ_WP(x) (0x20010 + (x) * 0x1000) +#define TSI721_IDQ_WP_PTR 0x0007ffff + +#define TSI721_IDQ_BASEL(x) (0x20014 + (x) * 0x1000) +#define TSI721_IDQ_BASEL_ADDR 0xffffffc0 +#define TSI721_IDQ_BASEU(x) (0x20018 + (x) * 0x1000) +#define TSI721_IDQ_SIZE(x) (0x2001c + (x) * 0x1000) +#define TSI721_IDQ_SIZE_VAL(size) (__fls(size) - 4) +#define TSI721_IDQ_SIZE_MIN 512 +#define TSI721_IDQ_SIZE_MAX (512 * 1024) + +#define TSI721_SR_CHINT(x) (0x20040 + (x) * 0x1000) +#define TSI721_SR_CHINTE(x) (0x20044 + (x) * 0x1000) +#define TSI721_SR_CHINTSET(x) (0x20048 + (x) * 0x1000) +#define TSI721_SR_CHINT_ODBOK 0x00000020 +#define TSI721_SR_CHINT_IDBQRCV 0x00000010 +#define TSI721_SR_CHINT_SUSP 0x00000008 +#define TSI721_SR_CHINT_ODBTO 0x00000004 +#define TSI721_SR_CHINT_ODBRTRY 0x00000002 +#define TSI721_SR_CHINT_ODBERR 0x00000001 +#define TSI721_SR_CHINT_ALL 0x0000003f + +#define TSI721_IBWIN_NUM 8 + +#define TSI721_IBWIN_LB(x) (0x29000 + (x) * 0x20) +#define TSI721_IBWIN_LB_BA 0xfffff000 +#define TSI721_IBWIN_LB_WEN 0x00000001 + +#define TSI721_IBWIN_UB(x) (0x29004 + (x) * 0x20) +#define TSI721_IBWIN_SZ(x) (0x29008 + (x) * 0x20) +#define TSI721_IBWIN_SZ_SIZE 0x00001f00 +#define TSI721_IBWIN_SIZE(size) (__fls(size) - 12) + +#define TSI721_IBWIN_TLA(x) (0x2900c + (x) * 0x20) +#define TSI721_IBWIN_TLA_ADD 0xfffff000 +#define TSI721_IBWIN_TUA(x) (0x29010 + (x) * 0x20) + +#define TSI721_SR2PC_GEN_INTE 0x29800 +#define TSI721_SR2PC_PWE 0x29804 +#define TSI721_SR2PC_GEN_INT 0x29808 + +#define TSI721_DEV_INTE 0x29840 +#define TSI721_DEV_INT 0x29844 +#define TSI721_DEV_INTSET 0x29848 +#define TSI721_DEV_INT_BDMA_CH 0x00002000 +#define TSI721_DEV_INT_BDMA_NCH 0x00001000 +#define TSI721_DEV_INT_SMSG_CH 0x00000800 +#define TSI721_DEV_INT_SMSG_NCH 0x00000400 +#define TSI721_DEV_INT_SR2PC_CH 0x00000200 +#define TSI721_DEV_INT_SRIO 0x00000020 + +#define TSI721_DEV_CHAN_INTE 0x2984c +#define TSI721_DEV_CHAN_INT 0x29850 + +#define TSI721_INT_SR2PC_CHAN_M 0xff000000 +#define TSI721_INT_SR2PC_CHAN(x) (1 << (24 + (x))) +#define TSI721_INT_IMSG_CHAN_M 0x00ff0000 +#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) +#define TSI721_INT_OMSG_CHAN_M 0x0000ff00 +#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) +#define TSI721_INT_BDMA_CHAN_M 0x000000ff +#define TSI721_INT_BDMA_CHAN(x) (1 << (x)) + +/* + * PC2SR block registers + */ +#define TSI721_OBWIN_NUM TSI721_PC2SR_WINS + +#define TSI721_OBWINLB(x) (0x40000 + (x) * 0x20) +#define TSI721_OBWINLB_BA 0xffff8000 +#define TSI721_OBWINLB_WEN 0x00000001 + +#define TSI721_OBWINUB(x) (0x40004 + (x) * 0x20) + +#define TSI721_OBWINSZ(x) (0x40008 + (x) * 0x20) +#define TSI721_OBWINSZ_SIZE 0x00001f00 +#define TSI721_OBWIN_SIZE(size) (__fls(size) - 15) + +#define TSI721_ZONE_SEL 0x41300 +#define TSI721_ZONE_SEL_RD_WRB 0x00020000 +#define TSI721_ZONE_SEL_GO 0x00010000 +#define TSI721_ZONE_SEL_WIN 0x00000038 +#define TSI721_ZONE_SEL_ZONE 0x00000007 + +#define TSI721_LUT_DATA0 0x41304 +#define TSI721_LUT_DATA0_ADD 0xfffff000 +#define TSI721_LUT_DATA0_RDTYPE 0x00000f00 +#define TSI721_LUT_DATA0_NREAD 0x00000100 +#define TSI721_LUT_DATA0_MNTRD 0x00000200 +#define TSI721_LUT_DATA0_RDCRF 0x00000020 +#define TSI721_LUT_DATA0_WRCRF 0x00000010 +#define TSI721_LUT_DATA0_WRTYPE 0x0000000f +#define TSI721_LUT_DATA0_NWR 0x00000001 +#define TSI721_LUT_DATA0_MNTWR 0x00000002 +#define TSI721_LUT_DATA0_NWR_R 0x00000004 + +#define TSI721_LUT_DATA1 0x41308 + +#define TSI721_LUT_DATA2 0x4130c +#define TSI721_LUT_DATA2_HC 0xff000000 +#define TSI721_LUT_DATA2_ADD65 0x000c0000 +#define TSI721_LUT_DATA2_TT 0x00030000 +#define TSI721_LUT_DATA2_DSTID 0x0000ffff + +#define TSI721_PC2SR_INTE 0x41310 + +#define TSI721_DEVCTL 0x48004 +#define TSI721_DEVCTL_SRBOOT_CMPL 0x00000004 + +#define TSI721_I2C_INT_ENABLE 0x49120 + +/* + * Block DMA Engine Registers + * x = 0..7 + */ + +#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000) + +#define TSI721_DMAC_DWRCNT 0x000 +#define TSI721_DMAC_DRDCNT 0x004 + +#define TSI721_DMAC_CTL 0x008 +#define TSI721_DMAC_CTL_SUSP 0x00000002 +#define TSI721_DMAC_CTL_INIT 0x00000001 + +#define TSI721_DMAC_INT 0x00c +#define TSI721_DMAC_INT_STFULL 0x00000010 +#define TSI721_DMAC_INT_DONE 0x00000008 +#define TSI721_DMAC_INT_SUSP 0x00000004 +#define TSI721_DMAC_INT_ERR 0x00000002 +#define TSI721_DMAC_INT_IOFDONE 0x00000001 +#define TSI721_DMAC_INT_ALL 0x0000001f + +#define TSI721_DMAC_INTSET 0x010 + +#define TSI721_DMAC_STS 0x014 +#define TSI721_DMAC_STS_ABORT 0x00400000 +#define TSI721_DMAC_STS_RUN 0x00200000 +#define TSI721_DMAC_STS_CS 0x001f0000 + +#define TSI721_DMAC_INTE 0x018 + +#define TSI721_DMAC_DPTRL 0x024 +#define TSI721_DMAC_DPTRL_MASK 0xffffffe0 + +#define TSI721_DMAC_DPTRH 0x028 + +#define TSI721_DMAC_DSBL 0x02c +#define TSI721_DMAC_DSBL_MASK 0xffffffc0 + +#define TSI721_DMAC_DSBH 0x030 + +#define TSI721_DMAC_DSSZ 0x034 +#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f +#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) + +#define TSI721_DMAC_DSRP 0x038 +#define TSI721_DMAC_DSRP_MASK 0x0007ffff + +#define TSI721_DMAC_DSWP 0x03c +#define TSI721_DMAC_DSWP_MASK 0x0007ffff + +#define TSI721_BDMA_INTE 0x5f000 + +/* + * Messaging definitions + */ +#define TSI721_MSG_BUFFER_SIZE RIO_MAX_MSG_SIZE +#define TSI721_MSG_MAX_SIZE RIO_MAX_MSG_SIZE +#define TSI721_IMSG_MAXCH 8 +#define TSI721_IMSG_CHNUM TSI721_IMSG_MAXCH +#define TSI721_IMSGD_MIN_RING_SIZE 32 +#define TSI721_IMSGD_RING_SIZE 512 + +#define TSI721_OMSG_CHNUM 4 /* One channel per MBOX */ +#define TSI721_OMSGD_MIN_RING_SIZE 32 +#define TSI721_OMSGD_RING_SIZE 512 + +/* + * Outbound Messaging Engine Registers + * x = 0..7 + */ + +#define TSI721_OBDMAC_DWRCNT(x) (0x61000 + (x) * 0x1000) + +#define TSI721_OBDMAC_DRDCNT(x) (0x61004 + (x) * 0x1000) + +#define TSI721_OBDMAC_CTL(x) (0x61008 + (x) * 0x1000) +#define TSI721_OBDMAC_CTL_MASK 0x00000007 +#define TSI721_OBDMAC_CTL_RETRY_THR 0x00000004 +#define TSI721_OBDMAC_CTL_SUSPEND 0x00000002 +#define TSI721_OBDMAC_CTL_INIT 0x00000001 + +#define TSI721_OBDMAC_INT(x) (0x6100c + (x) * 0x1000) +#define TSI721_OBDMAC_INTSET(x) (0x61010 + (x) * 0x1000) +#define TSI721_OBDMAC_INTE(x) (0x61018 + (x) * 0x1000) +#define TSI721_OBDMAC_INT_MASK 0x0000001F +#define TSI721_OBDMAC_INT_ST_FULL 0x00000010 +#define TSI721_OBDMAC_INT_DONE 0x00000008 +#define TSI721_OBDMAC_INT_SUSPENDED 0x00000004 +#define TSI721_OBDMAC_INT_ERROR 0x00000002 +#define TSI721_OBDMAC_INT_IOF_DONE 0x00000001 +#define TSI721_OBDMAC_INT_ALL TSI721_OBDMAC_INT_MASK + +#define TSI721_OBDMAC_STS(x) (0x61014 + (x) * 0x1000) +#define TSI721_OBDMAC_STS_MASK 0x007f0000 +#define TSI721_OBDMAC_STS_ABORT 0x00400000 +#define TSI721_OBDMAC_STS_RUN 0x00200000 +#define TSI721_OBDMAC_STS_CS 0x001f0000 + +#define TSI721_OBDMAC_PWE(x) (0x6101c + (x) * 0x1000) +#define TSI721_OBDMAC_PWE_MASK 0x00000002 +#define TSI721_OBDMAC_PWE_ERROR_EN 0x00000002 + +#define TSI721_OBDMAC_DPTRL(x) (0x61020 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRL_MASK 0xfffffff0 + +#define TSI721_OBDMAC_DPTRH(x) (0x61024 + (x) * 0x1000) +#define TSI721_OBDMAC_DPTRH_MASK 0xffffffff + +#define TSI721_OBDMAC_DSBL(x) (0x61040 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBL_MASK 0xffffffc0 + +#define TSI721_OBDMAC_DSBH(x) (0x61044 + (x) * 0x1000) +#define TSI721_OBDMAC_DSBH_MASK 0xffffffff + +#define TSI721_OBDMAC_DSSZ(x) (0x61048 + (x) * 0x1000) +#define TSI721_OBDMAC_DSSZ_MASK 0x0000000f + +#define TSI721_OBDMAC_DSRP(x) (0x6104c + (x) * 0x1000) +#define TSI721_OBDMAC_DSRP_MASK 0x0007ffff + +#define TSI721_OBDMAC_DSWP(x) (0x61050 + (x) * 0x1000) +#define TSI721_OBDMAC_DSWP_MASK 0x0007ffff + +#define TSI721_RQRPTO 0x60010 +#define TSI721_RQRPTO_MASK 0x00ffffff +#define TSI721_RQRPTO_VAL 400 /* Response TO value */ + +/* + * Inbound Messaging Engine Registers + * x = 0..7 + */ + +#define TSI721_IB_DEVID_GLOBAL 0xffff +#define TSI721_IBDMAC_FQBL(x) (0x61200 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBL_MASK 0xffffffc0 + +#define TSI721_IBDMAC_FQBH(x) (0x61204 + (x) * 0x1000) +#define TSI721_IBDMAC_FQBH_MASK 0xffffffff + +#define TSI721_IBDMAC_FQSZ_ENTRY_INX TSI721_IMSGD_RING_SIZE +#define TSI721_IBDMAC_FQSZ(x) (0x61208 + (x) * 0x1000) +#define TSI721_IBDMAC_FQSZ_MASK 0x0000000f + +#define TSI721_IBDMAC_FQRP(x) (0x6120c + (x) * 0x1000) +#define TSI721_IBDMAC_FQRP_MASK 0x0007ffff + +#define TSI721_IBDMAC_FQWP(x) (0x61210 + (x) * 0x1000) +#define TSI721_IBDMAC_FQWP_MASK 0x0007ffff + +#define TSI721_IBDMAC_FQTH(x) (0x61214 + (x) * 0x1000) +#define TSI721_IBDMAC_FQTH_MASK 0x0007ffff + +#define TSI721_IB_DEVID 0x60020 +#define TSI721_IB_DEVID_MASK 0x0000ffff + +#define TSI721_IBDMAC_CTL(x) (0x61240 + (x) * 0x1000) +#define TSI721_IBDMAC_CTL_MASK 0x00000003 +#define TSI721_IBDMAC_CTL_SUSPEND 0x00000002 +#define TSI721_IBDMAC_CTL_INIT 0x00000001 + +#define TSI721_IBDMAC_STS(x) (0x61244 + (x) * 0x1000) +#define TSI721_IBDMAC_STS_MASK 0x007f0000 +#define TSI721_IBSMAC_STS_ABORT 0x00400000 +#define TSI721_IBSMAC_STS_RUN 0x00200000 +#define TSI721_IBSMAC_STS_CS 0x001f0000 + +#define TSI721_IBDMAC_INT(x) (0x61248 + (x) * 0x1000) +#define TSI721_IBDMAC_INTSET(x) (0x6124c + (x) * 0x1000) +#define TSI721_IBDMAC_INTE(x) (0x61250 + (x) * 0x1000) +#define TSI721_IBDMAC_INT_MASK 0x0000100f +#define TSI721_IBDMAC_INT_SRTO 0x00001000 +#define TSI721_IBDMAC_INT_SUSPENDED 0x00000008 +#define TSI721_IBDMAC_INT_PC_ERROR 0x00000004 +#define TSI721_IBDMAC_INT_FQ_LOW 0x00000002 +#define TSI721_IBDMAC_INT_DQ_RCV 0x00000001 +#define TSI721_IBDMAC_INT_ALL TSI721_IBDMAC_INT_MASK + +#define TSI721_IBDMAC_PWE(x) (0x61254 + (x) * 0x1000) +#define TSI721_IBDMAC_PWE_MASK 0x00001700 +#define TSI721_IBDMAC_PWE_SRTO 0x00001000 +#define TSI721_IBDMAC_PWE_ILL_FMT 0x00000400 +#define TSI721_IBDMAC_PWE_ILL_DEC 0x00000200 +#define TSI721_IBDMAC_PWE_IMP_SP 0x00000100 + +#define TSI721_IBDMAC_DQBL(x) (0x61300 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBL_MASK 0xffffffc0 +#define TSI721_IBDMAC_DQBL_ADDR 0xffffffc0 + +#define TSI721_IBDMAC_DQBH(x) (0x61304 + (x) * 0x1000) +#define TSI721_IBDMAC_DQBH_MASK 0xffffffff + +#define TSI721_IBDMAC_DQRP(x) (0x61308 + (x) * 0x1000) +#define TSI721_IBDMAC_DQRP_MASK 0x0007ffff + +#define TSI721_IBDMAC_DQWR(x) (0x6130c + (x) * 0x1000) +#define TSI721_IBDMAC_DQWR_MASK 0x0007ffff + +#define TSI721_IBDMAC_DQSZ(x) (0x61314 + (x) * 0x1000) +#define TSI721_IBDMAC_DQSZ_MASK 0x0000000f + +/* + * Messaging Engine Interrupts + */ + +#define TSI721_SMSG_PWE 0x6a004 + +#define TSI721_SMSG_INTE 0x6a000 +#define TSI721_SMSG_INT 0x6a008 +#define TSI721_SMSG_INTSET 0x6a010 +#define TSI721_SMSG_INT_MASK 0x0086ffff +#define TSI721_SMSG_INT_UNS_RSP 0x00800000 +#define TSI721_SMSG_INT_ECC_NCOR 0x00040000 +#define TSI721_SMSG_INT_ECC_COR 0x00020000 +#define TSI721_SMSG_INT_ECC_NCOR_CH 0x0000ff00 +#define TSI721_SMSG_INT_ECC_COR_CH 0x000000ff + +#define TSI721_SMSG_ECC_LOG 0x6a014 +#define TSI721_SMSG_ECC_LOG_MASK 0x00070007 +#define TSI721_SMSG_ECC_LOG_ECC_NCOR_M 0x00070000 +#define TSI721_SMSG_ECC_LOG_ECC_COR_M 0x00000007 + +#define TSI721_RETRY_GEN_CNT 0x6a100 +#define TSI721_RETRY_GEN_CNT_MASK 0xffffffff + +#define TSI721_RETRY_RX_CNT 0x6a104 +#define TSI721_RETRY_RX_CNT_MASK 0xffffffff + +#define TSI721_SMSG_ECC_COR_LOG(x) (0x6a300 + (x) * 4) +#define TSI721_SMSG_ECC_COR_LOG_MASK 0x000000ff + +#define TSI721_SMSG_ECC_NCOR(x) (0x6a340 + (x) * 4) +#define TSI721_SMSG_ECC_NCOR_MASK 0x000000ff + +/* + * Block DMA Descriptors + */ + +struct tsi721_dma_desc { + __le32 type_id; + +#define TSI721_DMAD_DEVID 0x0000ffff +#define TSI721_DMAD_CRF 0x00010000 +#define TSI721_DMAD_PRIO 0x00060000 +#define TSI721_DMAD_RTYPE 0x00780000 +#define TSI721_DMAD_IOF 0x08000000 +#define TSI721_DMAD_DTYPE 0xe0000000 + + __le32 bcount; + +#define TSI721_DMAD_BCOUNT1 0x03ffffff /* if DTYPE == 1 */ +#define TSI721_DMAD_BCOUNT2 0x0000000f /* if DTYPE == 2 */ +#define TSI721_DMAD_TT 0x0c000000 +#define TSI721_DMAD_RADDR0 0xc0000000 + + union { + __le32 raddr_lo; /* if DTYPE == (1 || 2) */ + __le32 next_lo; /* if DTYPE == 3 */ + }; + +#define TSI721_DMAD_CFGOFF 0x00ffffff +#define TSI721_DMAD_HOPCNT 0xff000000 + + union { + __le32 raddr_hi; /* if DTYPE == (1 || 2) */ + __le32 next_hi; /* if DTYPE == 3 */ + }; + + union { + struct { /* if DTYPE == 1 */ + __le32 bufptr_lo; + __le32 bufptr_hi; + __le32 s_dist; + __le32 s_size; + } t1; + __le32 data[4]; /* if DTYPE == 2 */ + u32 reserved[4]; /* if DTYPE == 3 */ + }; +} __aligned(32); + +/* + * Inbound Messaging Descriptor + */ +struct tsi721_imsg_desc { + __le32 type_id; + +#define TSI721_IMD_DEVID 0x0000ffff +#define TSI721_IMD_CRF 0x00010000 +#define TSI721_IMD_PRIO 0x00060000 +#define TSI721_IMD_TT 0x00180000 +#define TSI721_IMD_DTYPE 0xe0000000 + + __le32 msg_info; + +#define TSI721_IMD_BCOUNT 0x00000ff8 +#define TSI721_IMD_SSIZE 0x0000f000 +#define TSI721_IMD_LETER 0x00030000 +#define TSI721_IMD_XMBOX 0x003c0000 +#define TSI721_IMD_MBOX 0x00c00000 +#define TSI721_IMD_CS 0x78000000 +#define TSI721_IMD_HO 0x80000000 + + __le32 bufptr_lo; + __le32 bufptr_hi; + u32 reserved[12]; + +} __aligned(64); + +/* + * Outbound Messaging Descriptor + */ +struct tsi721_omsg_desc { + __le32 type_id; + +#define TSI721_OMD_DEVID 0x0000ffff +#define TSI721_OMD_CRF 0x00010000 +#define TSI721_OMD_PRIO 0x00060000 +#define TSI721_OMD_IOF 0x08000000 +#define TSI721_OMD_DTYPE 0xe0000000 +#define TSI721_OMD_RSRVD 0x17f80000 + + __le32 msg_info; + +#define TSI721_OMD_BCOUNT 0x00000ff8 +#define TSI721_OMD_SSIZE 0x0000f000 +#define TSI721_OMD_LETER 0x00030000 +#define TSI721_OMD_XMBOX 0x003c0000 +#define TSI721_OMD_MBOX 0x00c00000 +#define TSI721_OMD_TT 0x0c000000 + + union { + __le32 bufptr_lo; /* if DTYPE == 4 */ + __le32 next_lo; /* if DTYPE == 5 */ + }; + + union { + __le32 bufptr_hi; /* if DTYPE == 4 */ + __le32 next_hi; /* if DTYPE == 5 */ + }; + +} __aligned(16); + +struct tsi721_dma_sts { + __le64 desc_sts[8]; +} __aligned(64); + +struct tsi721_desc_sts_fifo { + union { + __le64 da64; + struct { + __le32 lo; + __le32 hi; + } da32; + } stat[8]; +} __aligned(64); + +/* Descriptor types for BDMA and Messaging blocks */ +enum dma_dtype { + DTYPE1 = 1, /* Data Transfer DMA Descriptor */ + DTYPE2 = 2, /* Immediate Data Transfer DMA Descriptor */ + DTYPE3 = 3, /* Block Pointer DMA Descriptor */ + DTYPE4 = 4, /* Outbound Msg DMA Descriptor */ + DTYPE5 = 5, /* OB Messaging Block Pointer Descriptor */ + DTYPE6 = 6 /* Inbound Messaging Descriptor */ +}; + +enum dma_rtype { + NREAD = 0, + LAST_NWRITE_R = 1, + ALL_NWRITE = 2, + ALL_NWRITE_R = 3, + MAINT_RD = 4, + MAINT_WR = 5 +}; + +/* + * mport Driver Definitions + */ +#define TSI721_DMA_CHNUM TSI721_DMA_MAXCH + +#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ +#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ + +#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */ + +#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) + +enum tsi721_smsg_int_flag { + SMSG_INT_NONE = 0x00000000, + SMSG_INT_ECC_COR_CH = 0x000000ff, + SMSG_INT_ECC_NCOR_CH = 0x0000ff00, + SMSG_INT_ECC_COR = 0x00020000, + SMSG_INT_ECC_NCOR = 0x00040000, + SMSG_INT_UNS_RSP = 0x00800000, + SMSG_INT_ALL = 0x0006ffff +}; + +/* Structures */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +#define TSI721_BDMA_MAX_BCOUNT (TSI721_DMAD_BCOUNT1 + 1) + +struct tsi721_tx_desc { + struct dma_async_tx_descriptor txd; + u16 destid; + /* low 64-bits of 66-bit RIO address */ + u64 rio_addr; + /* upper 2-bits of 66-bit RIO address */ + u8 rio_addr_u; + enum dma_rtype rtype; + struct list_head desc_node; + struct scatterlist *sg; + unsigned int sg_len; + enum dma_status status; +}; + +struct tsi721_bdma_chan { + int id; + void __iomem *regs; + int bd_num; /* number of HW buffer descriptors */ + void *bd_base; /* start of DMA descriptors */ + dma_addr_t bd_phys; + void *sts_base; /* start of DMA BD status FIFO */ + dma_addr_t sts_phys; + int sts_size; + u32 sts_rdptr; + u32 wr_count; + u32 wr_count_next; + + struct dma_chan dchan; + struct tsi721_tx_desc *tx_desc; + spinlock_t lock; + struct list_head active_list; + struct list_head queue; + struct list_head free_list; + struct tasklet_struct tasklet; + bool active; +}; + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +struct tsi721_bdma_maint { + int ch_id; /* BDMA channel number */ + int bd_num; /* number of buffer descriptors */ + void *bd_base; /* start of DMA descriptors */ + dma_addr_t bd_phys; + void *sts_base; /* start of DMA BD status FIFO */ + dma_addr_t sts_phys; + int sts_size; +}; + +struct tsi721_imsg_ring { + u32 size; + /* VA/PA of data buffers for incoming messages */ + void *buf_base; + dma_addr_t buf_phys; + /* VA/PA of circular free buffer list */ + void *imfq_base; + dma_addr_t imfq_phys; + /* VA/PA of Inbound message descriptors */ + void *imd_base; + dma_addr_t imd_phys; + /* Inbound Queue buffer pointers */ + void *imq_base[TSI721_IMSGD_RING_SIZE]; + + u32 rx_slot; + void *dev_id; + u32 fq_wrptr; + u32 desc_rdptr; + spinlock_t lock; +}; + +struct tsi721_omsg_ring { + u32 size; + /* VA/PA of OB Msg descriptors */ + void *omd_base; + dma_addr_t omd_phys; + /* VA/PA of OB Msg data buffers */ + void *omq_base[TSI721_OMSGD_RING_SIZE]; + dma_addr_t omq_phys[TSI721_OMSGD_RING_SIZE]; + /* VA/PA of OB Msg descriptor status FIFO */ + void *sts_base; + dma_addr_t sts_phys; + u32 sts_size; /* # of allocated status entries */ + u32 sts_rdptr; + + u32 tx_slot; + void *dev_id; + u32 wr_count; + spinlock_t lock; +}; + +enum tsi721_flags { + TSI721_USING_MSI = (1 << 0), + TSI721_USING_MSIX = (1 << 1), + TSI721_IMSGID_SET = (1 << 2), +}; + +#ifdef CONFIG_PCI_MSI +/* + * MSI-X Table Entries (0 ... 69) + */ +#define TSI721_MSIX_DMACH_DONE(x) (0 + (x)) +#define TSI721_MSIX_DMACH_INT(x) (8 + (x)) +#define TSI721_MSIX_BDMA_INT 16 +#define TSI721_MSIX_OMSG_DONE(x) (17 + (x)) +#define TSI721_MSIX_OMSG_INT(x) (25 + (x)) +#define TSI721_MSIX_IMSG_DQ_RCV(x) (33 + (x)) +#define TSI721_MSIX_IMSG_INT(x) (41 + (x)) +#define TSI721_MSIX_MSG_INT 49 +#define TSI721_MSIX_SR2PC_IDBQ_RCV(x) (50 + (x)) +#define TSI721_MSIX_SR2PC_CH_INT(x) (58 + (x)) +#define TSI721_MSIX_SR2PC_INT 66 +#define TSI721_MSIX_PC2SR_INT 67 +#define TSI721_MSIX_SRIO_MAC_INT 68 +#define TSI721_MSIX_I2C_INT 69 + +/* MSI-X vector and init table entry indexes */ +enum tsi721_msix_vect { + TSI721_VECT_IDB, + TSI721_VECT_PWRX, /* PW_RX is part of SRIO MAC Interrupt reporting */ + TSI721_VECT_OMB0_DONE, + TSI721_VECT_OMB1_DONE, + TSI721_VECT_OMB2_DONE, + TSI721_VECT_OMB3_DONE, + TSI721_VECT_OMB0_INT, + TSI721_VECT_OMB1_INT, + TSI721_VECT_OMB2_INT, + TSI721_VECT_OMB3_INT, + TSI721_VECT_IMB0_RCV, + TSI721_VECT_IMB1_RCV, + TSI721_VECT_IMB2_RCV, + TSI721_VECT_IMB3_RCV, + TSI721_VECT_IMB0_INT, + TSI721_VECT_IMB1_INT, + TSI721_VECT_IMB2_INT, + TSI721_VECT_IMB3_INT, +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + TSI721_VECT_DMA0_DONE, + TSI721_VECT_DMA1_DONE, + TSI721_VECT_DMA2_DONE, + TSI721_VECT_DMA3_DONE, + TSI721_VECT_DMA4_DONE, + TSI721_VECT_DMA5_DONE, + TSI721_VECT_DMA6_DONE, + TSI721_VECT_DMA7_DONE, + TSI721_VECT_DMA0_INT, + TSI721_VECT_DMA1_INT, + TSI721_VECT_DMA2_INT, + TSI721_VECT_DMA3_INT, + TSI721_VECT_DMA4_INT, + TSI721_VECT_DMA5_INT, + TSI721_VECT_DMA6_INT, + TSI721_VECT_DMA7_INT, +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + TSI721_VECT_MAX +}; + +#define IRQ_DEVICE_NAME_MAX 64 + +struct msix_irq { + u16 vector; + char irq_name[IRQ_DEVICE_NAME_MAX]; +}; +#endif /* CONFIG_PCI_MSI */ + +struct tsi721_device { + struct pci_dev *pdev; + struct rio_mport *mport; + u32 flags; + void __iomem *regs; +#ifdef CONFIG_PCI_MSI + struct msix_irq msix[TSI721_VECT_MAX]; +#endif + /* Doorbells */ + void __iomem *odb_base; + void *idb_base; + dma_addr_t idb_dma; + struct work_struct idb_work; + u32 db_discard_count; + + /* Inbound Port-Write */ + struct work_struct pw_work; + struct kfifo pw_fifo; + spinlock_t pw_fifo_lock; + u32 pw_discard_count; + + /* BDMA Engine */ + struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */ + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; +#endif + + /* Inbound Messaging */ + int imsg_init[TSI721_IMSG_CHNUM]; + struct tsi721_imsg_ring imsg_ring[TSI721_IMSG_CHNUM]; + + /* Outbound Messaging */ + int omsg_init[TSI721_OMSG_CHNUM]; + struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; +}; + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE +extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan); +extern int tsi721_register_dma(struct tsi721_device *priv); +#endif + +#endif diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c new file mode 100644 index 000000000..47295940a --- /dev/null +++ b/drivers/rapidio/devices/tsi721_dma.c @@ -0,0 +1,907 @@ +/* + * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge + * + * Copyright (c) 2011-2014 Integrated Device Technology, Inc. + * Alexandre Bounine + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the Free + * Software Foundation; either version 2 of the License, or (at your option) + * any later version. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * The full GNU General Public License is included in this distribution in the + * file called COPYING. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "../../dma/dmaengine.h" + +#include "tsi721.h" + +#define TSI721_DMA_TX_QUEUE_SZ 16 /* number of transaction descriptors */ + +#ifdef CONFIG_PCI_MSI +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr); +#endif +static int tsi721_submit_sg(struct tsi721_tx_desc *desc); + +static unsigned int dma_desc_per_channel = 128; +module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO); +MODULE_PARM_DESC(dma_desc_per_channel, + "Number of DMA descriptors per channel (default: 128)"); + +static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan) +{ + return container_of(chan, struct tsi721_bdma_chan, dchan); +} + +static inline struct tsi721_device *to_tsi721(struct dma_device *ddev) +{ + return container_of(ddev, struct rio_mport, dma)->priv; +} + +static inline +struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd) +{ + return container_of(txd, struct tsi721_tx_desc, txd); +} + +static inline +struct tsi721_tx_desc *tsi721_dma_first_active( + struct tsi721_bdma_chan *bdma_chan) +{ + return list_first_entry(&bdma_chan->active_list, + struct tsi721_tx_desc, desc_node); +} + +static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num) +{ + struct tsi721_dma_desc *bd_ptr; + struct device *dev = bdma_chan->dchan.device->dev; + u64 *sts_ptr; + dma_addr_t bd_phys; + dma_addr_t sts_phys; + int sts_size; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif + + dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id); + + /* + * Allocate space for DMA descriptors + * (add an extra element for link descriptor) + */ + bd_ptr = dma_zalloc_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + &bd_phys, GFP_KERNEL); + if (!bd_ptr) + return -ENOMEM; + + bdma_chan->bd_num = bd_num; + bdma_chan->bd_phys = bd_phys; + bdma_chan->bd_base = bd_ptr; + + dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n", + bd_ptr, (unsigned long long)bd_phys); + + /* Allocate space for descriptor status FIFO */ + sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ? + (bd_num + 1) : TSI721_DMA_MINSTSSZ; + sts_size = roundup_pow_of_two(sts_size); + sts_ptr = dma_zalloc_coherent(dev, + sts_size * sizeof(struct tsi721_dma_sts), + &sts_phys, GFP_KERNEL); + if (!sts_ptr) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + bdma_chan->bd_base = NULL; + return -ENOMEM; + } + + bdma_chan->sts_phys = sts_phys; + bdma_chan->sts_base = sts_ptr; + bdma_chan->sts_size = sts_size; + + dev_dbg(dev, + "desc status FIFO @ %p (phys = %llx) size=0x%x\n", + sts_ptr, (unsigned long long)sts_phys, sts_size); + + /* Initialize DMA descriptors ring using added link descriptor */ + bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29); + bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys & + TSI721_DMAC_DPTRL_MASK); + bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32); + + /* Setup DMA descriptor pointers */ + iowrite32(((u64)bd_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DPTRH); + iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), + bdma_chan->regs + TSI721_DMAC_DPTRL); + + /* Setup descriptor status FIFO */ + iowrite32(((u64)sts_phys >> 32), + bdma_chan->regs + TSI721_DMAC_DSBH); + iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), + bdma_chan->regs + TSI721_DMAC_DSBL); + iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), + bdma_chan->regs + TSI721_DMAC_DSSZ); + + /* Clear interrupt bits */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + +#ifdef CONFIG_PCI_MSI + /* Request interrupt service if we are in MSI-X mode */ + if (priv->flags & TSI721_USING_MSIX) { + int rc, idx; + + idx = TSI721_VECT_DMA0_DONE + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n", + bdma_chan->id); + goto err_out; + } + + idx = TSI721_VECT_DMA0_INT + bdma_chan->id; + + rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0, + priv->msix[idx].irq_name, (void *)bdma_chan); + + if (rc) { + dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n", + bdma_chan->id); + free_irq( + priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, + (void *)bdma_chan); + } + +err_out: + if (rc) { + /* Free space allocated for DMA descriptors */ + dma_free_coherent(dev, + (bd_num + 1) * sizeof(struct tsi721_dma_desc), + bd_ptr, bd_phys); + bdma_chan->bd_base = NULL; + + /* Free space allocated for status descriptors */ + dma_free_coherent(dev, + sts_size * sizeof(struct tsi721_dma_sts), + sts_ptr, sts_phys); + bdma_chan->sts_base = NULL; + + return -EIO; + } + } +#endif /* CONFIG_PCI_MSI */ + + /* Toggle DMA channel initialization */ + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); + ioread32(bdma_chan->regs + TSI721_DMAC_CTL); + bdma_chan->wr_count = bdma_chan->wr_count_next = 0; + bdma_chan->sts_rdptr = 0; + udelay(10); + + return 0; +} + +static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan) +{ + u32 ch_stat; +#ifdef CONFIG_PCI_MSI + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); +#endif + + if (bdma_chan->bd_base == NULL) + return 0; + + /* Check if DMA channel still running */ + ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + if (ch_stat & TSI721_DMAC_STS_RUN) + return -EFAULT; + + /* Put DMA channel into init state */ + iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + free_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector, (void *)bdma_chan); + free_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector, (void *)bdma_chan); + } +#endif /* CONFIG_PCI_MSI */ + + /* Free space allocated for DMA descriptors */ + dma_free_coherent(bdma_chan->dchan.device->dev, + (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc), + bdma_chan->bd_base, bdma_chan->bd_phys); + bdma_chan->bd_base = NULL; + + /* Free space allocated for status FIFO */ + dma_free_coherent(bdma_chan->dchan.device->dev, + bdma_chan->sts_size * sizeof(struct tsi721_dma_sts), + bdma_chan->sts_base, bdma_chan->sts_phys); + bdma_chan->sts_base = NULL; + return 0; +} + +static void +tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable) +{ + if (enable) { + /* Clear pending BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + ioread32(bdma_chan->regs + TSI721_DMAC_INT); + /* Enable BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INTE); + } else { + /* Disable BDMA channel interrupts */ + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); + /* Clear pending BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, + bdma_chan->regs + TSI721_DMAC_INT); + } + +} + +static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan) +{ + u32 sts; + + sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + return ((sts & TSI721_DMAC_STS_RUN) == 0); +} + +void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan) +{ + /* Disable BDMA channel interrupts */ + iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE); + if (bdma_chan->active) + tasklet_schedule(&bdma_chan->tasklet); +} + +#ifdef CONFIG_PCI_MSI +/** + * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels + * @irq: Linux interrupt number + * @ptr: Pointer to interrupt-specific data (BDMA channel structure) + * + * Handles BDMA channel interrupts signaled using MSI-X. + */ +static irqreturn_t tsi721_bdma_msix(int irq, void *ptr) +{ + struct tsi721_bdma_chan *bdma_chan = ptr; + + tsi721_bdma_handler(bdma_chan); + return IRQ_HANDLED; +} +#endif /* CONFIG_PCI_MSI */ + +/* Must be called with the spinlock held */ +static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan) +{ + if (!tsi721_dma_is_idle(bdma_chan)) { + dev_err(bdma_chan->dchan.device->dev, + "BUG: Attempt to start non-idle channel\n"); + return; + } + + if (bdma_chan->wr_count == bdma_chan->wr_count_next) { + dev_err(bdma_chan->dchan.device->dev, + "BUG: Attempt to start DMA with no BDs ready\n"); + return; + } + + dev_dbg(bdma_chan->dchan.device->dev, + "%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id, + bdma_chan->wr_count_next); + + iowrite32(bdma_chan->wr_count_next, + bdma_chan->regs + TSI721_DMAC_DWRCNT); + ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT); + + bdma_chan->wr_count = bdma_chan->wr_count_next; +} + +static int +tsi721_desc_fill_init(struct tsi721_tx_desc *desc, + struct tsi721_dma_desc *bd_ptr, + struct scatterlist *sg, u32 sys_size) +{ + u64 rio_addr; + + if (bd_ptr == NULL) + return -EINVAL; + + /* Initialize DMA descriptor */ + bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) | + (desc->rtype << 19) | desc->destid); + bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) | + (sys_size << 26)); + rio_addr = (desc->rio_addr >> 2) | + ((u64)(desc->rio_addr_u & 0x3) << 62); + bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff); + bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32); + bd_ptr->t1.bufptr_lo = cpu_to_le32( + (u64)sg_dma_address(sg) & 0xffffffff); + bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32); + bd_ptr->t1.s_dist = 0; + bd_ptr->t1.s_size = 0; + + return 0; +} + +static int +tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt) +{ + if (bd_ptr == NULL) + return -EINVAL; + + /* Update DMA descriptor */ + if (interrupt) + bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF); + bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1); + + return 0; +} + +static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan, + struct tsi721_tx_desc *desc) +{ + struct dma_async_tx_descriptor *txd = &desc->txd; + dma_async_tx_callback callback = txd->callback; + void *param = txd->callback_param; + + list_move(&desc->desc_node, &bdma_chan->free_list); + + if (callback) + callback(param); +} + +static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan) +{ + u32 srd_ptr; + u64 *sts_ptr; + int i, j; + + /* Check and clear descriptor status FIFO entries */ + srd_ptr = bdma_chan->sts_rdptr; + sts_ptr = bdma_chan->sts_base; + j = srd_ptr * 8; + while (sts_ptr[j]) { + for (i = 0; i < 8 && sts_ptr[j]; i++, j++) + sts_ptr[j] = 0; + + ++srd_ptr; + srd_ptr %= bdma_chan->sts_size; + j = srd_ptr * 8; + } + + iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP); + bdma_chan->sts_rdptr = srd_ptr; +} + +/* Must be called with the channel spinlock held */ +static int tsi721_submit_sg(struct tsi721_tx_desc *desc) +{ + struct dma_chan *dchan = desc->txd.chan; + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + u32 sys_size; + u64 rio_addr; + dma_addr_t next_addr; + u32 bcount; + struct scatterlist *sg; + unsigned int i; + int err = 0; + struct tsi721_dma_desc *bd_ptr = NULL; + u32 idx, rd_idx; + u32 add_count = 0; + + if (!tsi721_dma_is_idle(bdma_chan)) { + dev_err(bdma_chan->dchan.device->dev, + "BUG: Attempt to use non-idle channel\n"); + return -EIO; + } + + /* + * Fill DMA channel's hardware buffer descriptors. + * (NOTE: RapidIO destination address is limited to 64 bits for now) + */ + rio_addr = desc->rio_addr; + next_addr = -1; + bcount = 0; + sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size; + + rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT); + rd_idx %= (bdma_chan->bd_num + 1); + + idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1); + if (idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + + dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n", + __func__, rd_idx, idx); + + for_each_sg(desc->sg, sg, desc->sg_len, i) { + + dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n", + i, desc->sg_len, + (unsigned long long)sg_dma_address(sg), sg_dma_len(sg)); + + if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) { + dev_err(dchan->device->dev, + "%s: SG entry %d is too large\n", __func__, i); + err = -EINVAL; + break; + } + + /* + * If this sg entry forms contiguous block with previous one, + * try to merge it into existing DMA descriptor + */ + if (next_addr == sg_dma_address(sg) && + bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) { + /* Adjust byte count of the descriptor */ + bcount += sg_dma_len(sg); + goto entry_done; + } else if (next_addr != -1) { + /* Finalize descriptor using total byte count value */ + tsi721_desc_fill_end(bd_ptr, bcount, 0); + dev_dbg(dchan->device->dev, + "%s: prev desc final len: %d\n", + __func__, bcount); + } + + desc->rio_addr = rio_addr; + + if (i && idx == rd_idx) { + dev_dbg(dchan->device->dev, + "%s: HW descriptor ring is full @ %d\n", + __func__, i); + desc->sg = sg; + desc->sg_len -= i; + break; + } + + bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx]; + err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size); + if (err) { + dev_err(dchan->device->dev, + "Failed to build desc: err=%d\n", err); + break; + } + + dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n", + bd_ptr, desc->destid, desc->rio_addr); + + next_addr = sg_dma_address(sg); + bcount = sg_dma_len(sg); + + add_count++; + if (++idx == bdma_chan->bd_num) { + /* wrap around link descriptor */ + idx = 0; + add_count++; + } + +entry_done: + if (sg_is_last(sg)) { + tsi721_desc_fill_end(bd_ptr, bcount, 0); + dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n", + __func__, bcount); + desc->sg_len = 0; + } else { + rio_addr += sg_dma_len(sg); + next_addr += sg_dma_len(sg); + } + } + + if (!err) + bdma_chan->wr_count_next += add_count; + + return err; +} + +static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan) +{ + struct tsi721_tx_desc *desc; + int err; + + dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__); + + /* + * If there are any new transactions in the queue add them + * into the processing list + */ + if (!list_empty(&bdma_chan->queue)) + list_splice_init(&bdma_chan->queue, &bdma_chan->active_list); + + /* Start new transaction (if available) */ + if (!list_empty(&bdma_chan->active_list)) { + desc = tsi721_dma_first_active(bdma_chan); + err = tsi721_submit_sg(desc); + if (!err) + tsi721_start_dma(bdma_chan); + else { + tsi721_dma_tx_err(bdma_chan, desc); + dev_dbg(bdma_chan->dchan.device->dev, + "ERR: tsi721_submit_sg failed with err=%d\n", + err); + } + } + + dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__); +} + +static void tsi721_dma_tasklet(unsigned long data) +{ + struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data; + u32 dmac_int, dmac_sts; + + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); + dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n", + __func__, bdma_chan->id, dmac_int); + /* Clear channel interrupts */ + iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT); + + if (dmac_int & TSI721_DMAC_INT_ERR) { + dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS); + dev_err(bdma_chan->dchan.device->dev, + "%s: DMA ERROR - DMAC%d_STS = 0x%x\n", + __func__, bdma_chan->id, dmac_sts); + } + + if (dmac_int & TSI721_DMAC_INT_STFULL) { + dev_err(bdma_chan->dchan.device->dev, + "%s: DMAC%d descriptor status FIFO is full\n", + __func__, bdma_chan->id); + } + + if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) { + struct tsi721_tx_desc *desc; + + tsi721_clr_stat(bdma_chan); + spin_lock(&bdma_chan->lock); + desc = tsi721_dma_first_active(bdma_chan); + + if (desc->sg_len == 0) { + dma_async_tx_callback callback = NULL; + void *param = NULL; + + desc->status = DMA_COMPLETE; + dma_cookie_complete(&desc->txd); + if (desc->txd.flags & DMA_PREP_INTERRUPT) { + callback = desc->txd.callback; + param = desc->txd.callback_param; + } + list_move(&desc->desc_node, &bdma_chan->free_list); + spin_unlock(&bdma_chan->lock); + if (callback) + callback(param); + spin_lock(&bdma_chan->lock); + } + + tsi721_advance_work(bdma_chan); + spin_unlock(&bdma_chan->lock); + } + + /* Re-Enable BDMA channel interrupts */ + iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE); +} + +static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd) +{ + struct tsi721_tx_desc *desc = to_tsi721_desc(txd); + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan); + dma_cookie_t cookie; + + /* Check if the descriptor is detached from any lists */ + if (!list_empty(&desc->desc_node)) { + dev_err(bdma_chan->dchan.device->dev, + "%s: wrong state of descriptor %p\n", __func__, txd); + return -EIO; + } + + spin_lock_bh(&bdma_chan->lock); + + if (!bdma_chan->active) { + spin_unlock_bh(&bdma_chan->lock); + return -ENODEV; + } + + cookie = dma_cookie_assign(txd); + desc->status = DMA_IN_PROGRESS; + list_add_tail(&desc->desc_node, &bdma_chan->queue); + + spin_unlock_bh(&bdma_chan->lock); + return cookie; +} + +static int tsi721_alloc_chan_resources(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc = NULL; + int i; + + dev_dbg(dchan->device->dev, "%s: for channel %d\n", + __func__, bdma_chan->id); + + if (bdma_chan->bd_base) + return TSI721_DMA_TX_QUEUE_SZ; + + /* Initialize BDMA channel */ + if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) { + dev_err(dchan->device->dev, "Unable to initialize data DMA" + " channel %d, aborting\n", bdma_chan->id); + return -ENODEV; + } + + /* Allocate queue of transaction descriptors */ + desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc), + GFP_KERNEL); + if (!desc) { + dev_err(dchan->device->dev, + "Failed to allocate logical descriptors\n"); + tsi721_bdma_ch_free(bdma_chan); + return -ENOMEM; + } + + bdma_chan->tx_desc = desc; + + for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) { + dma_async_tx_descriptor_init(&desc[i].txd, dchan); + desc[i].txd.tx_submit = tsi721_tx_submit; + desc[i].txd.flags = DMA_CTRL_ACK; + list_add(&desc[i].desc_node, &bdma_chan->free_list); + } + + dma_cookie_init(dchan); + + bdma_chan->active = true; + tsi721_bdma_interrupt_enable(bdma_chan, 1); + + return TSI721_DMA_TX_QUEUE_SZ; +} + +static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan) +{ + struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device); + +#ifdef CONFIG_PCI_MSI + if (priv->flags & TSI721_USING_MSIX) { + synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE + + bdma_chan->id].vector); + synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT + + bdma_chan->id].vector); + } else +#endif + synchronize_irq(priv->pdev->irq); +} + +static void tsi721_free_chan_resources(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + + dev_dbg(dchan->device->dev, "%s: for channel %d\n", + __func__, bdma_chan->id); + + if (bdma_chan->bd_base == NULL) + return; + + BUG_ON(!list_empty(&bdma_chan->active_list)); + BUG_ON(!list_empty(&bdma_chan->queue)); + + tsi721_bdma_interrupt_enable(bdma_chan, 0); + bdma_chan->active = false; + tsi721_sync_dma_irq(bdma_chan); + tasklet_kill(&bdma_chan->tasklet); + INIT_LIST_HEAD(&bdma_chan->free_list); + kfree(bdma_chan->tx_desc); + tsi721_bdma_ch_free(bdma_chan); +} + +static +enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie, + struct dma_tx_state *txstate) +{ + return dma_cookie_status(dchan, cookie, txstate); +} + +static void tsi721_issue_pending(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + + dev_dbg(dchan->device->dev, "%s: Enter\n", __func__); + + if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) { + spin_lock_bh(&bdma_chan->lock); + tsi721_advance_work(bdma_chan); + spin_unlock_bh(&bdma_chan->lock); + } +} + +static +struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan, + struct scatterlist *sgl, unsigned int sg_len, + enum dma_transfer_direction dir, unsigned long flags, + void *tinfo) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc, *_d; + struct rio_dma_ext *rext = tinfo; + enum dma_rtype rtype; + struct dma_async_tx_descriptor *txd = NULL; + + if (!sgl || !sg_len) { + dev_err(dchan->device->dev, "%s: No SG list\n", __func__); + return NULL; + } + + dev_dbg(dchan->device->dev, "%s: %s\n", __func__, + (dir == DMA_DEV_TO_MEM)?"READ":"WRITE"); + + if (dir == DMA_DEV_TO_MEM) + rtype = NREAD; + else if (dir == DMA_MEM_TO_DEV) { + switch (rext->wr_type) { + case RDW_ALL_NWRITE: + rtype = ALL_NWRITE; + break; + case RDW_ALL_NWRITE_R: + rtype = ALL_NWRITE_R; + break; + case RDW_LAST_NWRITE_R: + default: + rtype = LAST_NWRITE_R; + break; + } + } else { + dev_err(dchan->device->dev, + "%s: Unsupported DMA direction option\n", __func__); + return NULL; + } + + spin_lock_bh(&bdma_chan->lock); + + list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) { + if (async_tx_test_ack(&desc->txd)) { + list_del_init(&desc->desc_node); + desc->destid = rext->destid; + desc->rio_addr = rext->rio_addr; + desc->rio_addr_u = 0; + desc->rtype = rtype; + desc->sg_len = sg_len; + desc->sg = sgl; + txd = &desc->txd; + txd->flags = flags; + break; + } + } + + spin_unlock_bh(&bdma_chan->lock); + + return txd; +} + +static int tsi721_terminate_all(struct dma_chan *dchan) +{ + struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan); + struct tsi721_tx_desc *desc, *_d; + u32 dmac_int; + LIST_HEAD(list); + + dev_dbg(dchan->device->dev, "%s: Entry\n", __func__); + + spin_lock_bh(&bdma_chan->lock); + + bdma_chan->active = false; + + if (!tsi721_dma_is_idle(bdma_chan)) { + /* make sure to stop the transfer */ + iowrite32(TSI721_DMAC_CTL_SUSP, + bdma_chan->regs + TSI721_DMAC_CTL); + + /* Wait until DMA channel stops */ + do { + dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT); + } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0); + } + + list_splice_init(&bdma_chan->active_list, &list); + list_splice_init(&bdma_chan->queue, &list); + + list_for_each_entry_safe(desc, _d, &list, desc_node) + tsi721_dma_tx_err(bdma_chan, desc); + + spin_unlock_bh(&bdma_chan->lock); + + return 0; +} + +int tsi721_register_dma(struct tsi721_device *priv) +{ + int i; + int nr_channels = 0; + int err; + struct rio_mport *mport = priv->mport; + + INIT_LIST_HEAD(&mport->dma.channels); + + for (i = 0; i < TSI721_DMA_MAXCH; i++) { + struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i]; + + if (i == TSI721_DMACH_MAINT) + continue; + + bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i); + + bdma_chan->dchan.device = &mport->dma; + bdma_chan->dchan.cookie = 1; + bdma_chan->dchan.chan_id = i; + bdma_chan->id = i; + bdma_chan->active = false; + + spin_lock_init(&bdma_chan->lock); + + INIT_LIST_HEAD(&bdma_chan->active_list); + INIT_LIST_HEAD(&bdma_chan->queue); + INIT_LIST_HEAD(&bdma_chan->free_list); + + tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet, + (unsigned long)bdma_chan); + list_add_tail(&bdma_chan->dchan.device_node, + &mport->dma.channels); + nr_channels++; + } + + mport->dma.chancnt = nr_channels; + dma_cap_zero(mport->dma.cap_mask); + dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask); + dma_cap_set(DMA_SLAVE, mport->dma.cap_mask); + + mport->dma.dev = &priv->pdev->dev; + mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources; + mport->dma.device_free_chan_resources = tsi721_free_chan_resources; + mport->dma.device_tx_status = tsi721_tx_status; + mport->dma.device_issue_pending = tsi721_issue_pending; + mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg; + mport->dma.device_terminate_all = tsi721_terminate_all; + + err = dma_async_device_register(&mport->dma); + if (err) + dev_err(&priv->pdev->dev, "Failed to register DMA device\n"); + + return err; +} diff --git a/drivers/rapidio/rio-access.c b/drivers/rapidio/rio-access.c new file mode 100644 index 000000000..a3824baca --- /dev/null +++ b/drivers/rapidio/rio-access.c @@ -0,0 +1,175 @@ +/* + * RapidIO configuration space access support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +/* + * These interrupt-safe spinlocks protect all accesses to RIO + * configuration space and doorbell access. + */ +static DEFINE_SPINLOCK(rio_config_lock); +static DEFINE_SPINLOCK(rio_doorbell_lock); + +/* + * Wrappers for all RIO configuration access functions. They just check + * alignment, do locking and call the low-level functions pointed to + * by rio_mport->ops. + */ + +#define RIO_8_BAD 0 +#define RIO_16_BAD (offset & 1) +#define RIO_32_BAD (offset & 3) + +/** + * RIO_LOP_READ - Generate rio_local_read_config_* functions + * @size: Size of configuration space read (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space read (1, 2, 4 bytes) + * + * Generates rio_local_read_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_LOP_READ(size,type,len) \ +int __rio_local_read_config_##size \ + (struct rio_mport *mport, u32 offset, type *value) \ +{ \ + int res; \ + unsigned long flags; \ + u32 data = 0; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + spin_lock_irqsave(&rio_config_lock, flags); \ + res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ + *value = (type)data; \ + spin_unlock_irqrestore(&rio_config_lock, flags); \ + return res; \ +} + +/** + * RIO_LOP_WRITE - Generate rio_local_write_config_* functions + * @size: Size of configuration space write (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space write (1, 2, 4 bytes) + * + * Generates rio_local_write_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_LOP_WRITE(size,type,len) \ +int __rio_local_write_config_##size \ + (struct rio_mport *mport, u32 offset, type value) \ +{ \ + int res; \ + unsigned long flags; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + spin_lock_irqsave(&rio_config_lock, flags); \ + res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\ + spin_unlock_irqrestore(&rio_config_lock, flags); \ + return res; \ +} + +RIO_LOP_READ(8, u8, 1) +RIO_LOP_READ(16, u16, 2) +RIO_LOP_READ(32, u32, 4) +RIO_LOP_WRITE(8, u8, 1) +RIO_LOP_WRITE(16, u16, 2) +RIO_LOP_WRITE(32, u32, 4) + +EXPORT_SYMBOL_GPL(__rio_local_read_config_8); +EXPORT_SYMBOL_GPL(__rio_local_read_config_16); +EXPORT_SYMBOL_GPL(__rio_local_read_config_32); +EXPORT_SYMBOL_GPL(__rio_local_write_config_8); +EXPORT_SYMBOL_GPL(__rio_local_write_config_16); +EXPORT_SYMBOL_GPL(__rio_local_write_config_32); + +/** + * RIO_OP_READ - Generate rio_mport_read_config_* functions + * @size: Size of configuration space read (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space read (1, 2, 4 bytes) + * + * Generates rio_mport_read_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_OP_READ(size,type,len) \ +int rio_mport_read_config_##size \ + (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ +{ \ + int res; \ + unsigned long flags; \ + u32 data = 0; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + spin_lock_irqsave(&rio_config_lock, flags); \ + res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ + *value = (type)data; \ + spin_unlock_irqrestore(&rio_config_lock, flags); \ + return res; \ +} + +/** + * RIO_OP_WRITE - Generate rio_mport_write_config_* functions + * @size: Size of configuration space write (8, 16, 32 bits) + * @type: C type of value argument + * @len: Length of configuration space write (1, 2, 4 bytes) + * + * Generates rio_mport_write_config_* functions used to access + * configuration space registers on the local device. + */ +#define RIO_OP_WRITE(size,type,len) \ +int rio_mport_write_config_##size \ + (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ +{ \ + int res; \ + unsigned long flags; \ + if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ + spin_lock_irqsave(&rio_config_lock, flags); \ + res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ + spin_unlock_irqrestore(&rio_config_lock, flags); \ + return res; \ +} + +RIO_OP_READ(8, u8, 1) +RIO_OP_READ(16, u16, 2) +RIO_OP_READ(32, u32, 4) +RIO_OP_WRITE(8, u8, 1) +RIO_OP_WRITE(16, u16, 2) +RIO_OP_WRITE(32, u32, 4) + +EXPORT_SYMBOL_GPL(rio_mport_read_config_8); +EXPORT_SYMBOL_GPL(rio_mport_read_config_16); +EXPORT_SYMBOL_GPL(rio_mport_read_config_32); +EXPORT_SYMBOL_GPL(rio_mport_write_config_8); +EXPORT_SYMBOL_GPL(rio_mport_write_config_16); +EXPORT_SYMBOL_GPL(rio_mport_write_config_32); + +/** + * rio_mport_send_doorbell - Send a doorbell message + * + * @mport: RIO master port + * @destid: RIO device destination ID + * @data: Doorbell message data + * + * Send a doorbell message to a RIO device. The doorbell message + * has a 16-bit info field provided by the data argument. + */ +int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) +{ + int res; + unsigned long flags; + + spin_lock_irqsave(&rio_doorbell_lock, flags); + res = mport->ops->dsend(mport, mport->id, destid, data); + spin_unlock_irqrestore(&rio_doorbell_lock, flags); + + return res; +} + +EXPORT_SYMBOL_GPL(rio_mport_send_doorbell); diff --git a/drivers/rapidio/rio-driver.c b/drivers/rapidio/rio-driver.c new file mode 100644 index 000000000..f301f059b --- /dev/null +++ b/drivers/rapidio/rio-driver.c @@ -0,0 +1,260 @@ +/* + * RapidIO driver support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include + +#include "rio.h" + +/** + * rio_match_device - Tell if a RIO device has a matching RIO device id structure + * @id: the RIO device id structure to match against + * @rdev: the RIO device structure to match against + * + * Used from driver probe and bus matching to check whether a RIO device + * matches a device id structure provided by a RIO driver. Returns the + * matching &struct rio_device_id or %NULL if there is no match. + */ +static const struct rio_device_id *rio_match_device(const struct rio_device_id + *id, + const struct rio_dev *rdev) +{ + while (id->vid || id->asm_vid) { + if (((id->vid == RIO_ANY_ID) || (id->vid == rdev->vid)) && + ((id->did == RIO_ANY_ID) || (id->did == rdev->did)) && + ((id->asm_vid == RIO_ANY_ID) + || (id->asm_vid == rdev->asm_vid)) + && ((id->asm_did == RIO_ANY_ID) + || (id->asm_did == rdev->asm_did))) + return id; + id++; + } + return NULL; +} + +/** + * rio_dev_get - Increments the reference count of the RIO device structure + * + * @rdev: RIO device being referenced + * + * Each live reference to a device should be refcounted. + * + * Drivers for RIO devices should normally record such references in + * their probe() methods, when they bind to a device, and release + * them by calling rio_dev_put(), in their disconnect() methods. + */ +struct rio_dev *rio_dev_get(struct rio_dev *rdev) +{ + if (rdev) + get_device(&rdev->dev); + + return rdev; +} + +/** + * rio_dev_put - Release a use of the RIO device structure + * + * @rdev: RIO device being disconnected + * + * Must be called when a user of a device is finished with it. + * When the last user of the device calls this function, the + * memory of the device is freed. + */ +void rio_dev_put(struct rio_dev *rdev) +{ + if (rdev) + put_device(&rdev->dev); +} + +/** + * rio_device_probe - Tell if a RIO device structure has a matching RIO device id structure + * @dev: the RIO device structure to match against + * + * return 0 and set rio_dev->driver when drv claims rio_dev, else error + */ +static int rio_device_probe(struct device *dev) +{ + struct rio_driver *rdrv = to_rio_driver(dev->driver); + struct rio_dev *rdev = to_rio_dev(dev); + int error = -ENODEV; + const struct rio_device_id *id; + + if (!rdev->driver && rdrv->probe) { + if (!rdrv->id_table) + return error; + id = rio_match_device(rdrv->id_table, rdev); + rio_dev_get(rdev); + if (id) + error = rdrv->probe(rdev, id); + if (error >= 0) { + rdev->driver = rdrv; + error = 0; + } else + rio_dev_put(rdev); + } + return error; +} + +/** + * rio_device_remove - Remove a RIO device from the system + * + * @dev: the RIO device structure to match against + * + * Remove a RIO device from the system. If it has an associated + * driver, then run the driver remove() method. Then update + * the reference count. + */ +static int rio_device_remove(struct device *dev) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = rdev->driver; + + if (rdrv) { + if (rdrv->remove) + rdrv->remove(rdev); + rdev->driver = NULL; + } + + rio_dev_put(rdev); + + return 0; +} + +/** + * rio_register_driver - register a new RIO driver + * @rdrv: the RIO driver structure to register + * + * Adds a &struct rio_driver to the list of registered drivers. + * Returns a negative value on error, otherwise 0. If no error + * occurred, the driver remains registered even if no device + * was claimed during registration. + */ +int rio_register_driver(struct rio_driver *rdrv) +{ + /* initialize common driver fields */ + rdrv->driver.name = rdrv->name; + rdrv->driver.bus = &rio_bus_type; + + /* register with core */ + return driver_register(&rdrv->driver); +} + +/** + * rio_unregister_driver - unregister a RIO driver + * @rdrv: the RIO driver structure to unregister + * + * Deletes the &struct rio_driver from the list of registered RIO + * drivers, gives it a chance to clean up by calling its remove() + * function for each device it was responsible for, and marks those + * devices as driverless. + */ +void rio_unregister_driver(struct rio_driver *rdrv) +{ + driver_unregister(&rdrv->driver); +} + +void rio_attach_device(struct rio_dev *rdev) +{ + rdev->dev.bus = &rio_bus_type; +} +EXPORT_SYMBOL_GPL(rio_attach_device); + +/** + * rio_match_bus - Tell if a RIO device structure has a matching RIO driver device id structure + * @dev: the standard device structure to match against + * @drv: the standard driver structure containing the ids to match against + * + * Used by a driver to check whether a RIO device present in the + * system is in its list of supported devices. Returns 1 if + * there is a matching &struct rio_device_id or 0 if there is + * no match. + */ +static int rio_match_bus(struct device *dev, struct device_driver *drv) +{ + struct rio_dev *rdev = to_rio_dev(dev); + struct rio_driver *rdrv = to_rio_driver(drv); + const struct rio_device_id *id = rdrv->id_table; + const struct rio_device_id *found_id; + + if (!id) + goto out; + + found_id = rio_match_device(id, rdev); + + if (found_id) + return 1; + + out:return 0; +} + +static int rio_uevent(struct device *dev, struct kobj_uevent_env *env) +{ + struct rio_dev *rdev; + + if (!dev) + return -ENODEV; + + rdev = to_rio_dev(dev); + if (!rdev) + return -ENODEV; + + if (add_uevent_var(env, "MODALIAS=rapidio:v%04Xd%04Xav%04Xad%04X", + rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did)) + return -ENOMEM; + return 0; +} + +struct class rio_mport_class = { + .name = "rapidio_port", + .owner = THIS_MODULE, + .dev_groups = rio_mport_groups, +}; +EXPORT_SYMBOL_GPL(rio_mport_class); + +struct bus_type rio_bus_type = { + .name = "rapidio", + .match = rio_match_bus, + .dev_groups = rio_dev_groups, + .bus_groups = rio_bus_groups, + .probe = rio_device_probe, + .remove = rio_device_remove, + .uevent = rio_uevent, +}; + +/** + * rio_bus_init - Register the RapidIO bus with the device model + * + * Registers the RIO mport device class and RIO bus type with the Linux + * device model. + */ +static int __init rio_bus_init(void) +{ + int ret; + + ret = class_register(&rio_mport_class); + if (!ret) { + ret = bus_register(&rio_bus_type); + if (ret) + class_unregister(&rio_mport_class); + } + return ret; +} + +postcore_initcall(rio_bus_init); + +EXPORT_SYMBOL_GPL(rio_register_driver); +EXPORT_SYMBOL_GPL(rio_unregister_driver); +EXPORT_SYMBOL_GPL(rio_bus_type); +EXPORT_SYMBOL_GPL(rio_dev_get); +EXPORT_SYMBOL_GPL(rio_dev_put); diff --git a/drivers/rapidio/rio-scan.c b/drivers/rapidio/rio-scan.c new file mode 100644 index 000000000..47a1b2ea7 --- /dev/null +++ b/drivers/rapidio/rio-scan.c @@ -0,0 +1,1201 @@ +/* + * RapidIO enumeration and discovery support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * Copyright 2009 Integrated Device Technology, Inc. + * Alex Bounine + * - Added Port-Write/Error Management initialization and handling + * + * Copyright 2009 Sysgo AG + * Thomas Moll + * - Added Input- Output- enable functionality, to allow full communication + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rio.h" + +static void rio_init_em(struct rio_dev *rdev); + +static int next_destid = 0; +static int next_comptag = 1; + +static int rio_mport_phys_table[] = { + RIO_EFB_PAR_EP_ID, + RIO_EFB_PAR_EP_REC_ID, + RIO_EFB_SER_EP_ID, + RIO_EFB_SER_EP_REC_ID, + -1, +}; + + +/** + * rio_destid_alloc - Allocate next available destID for given network + * @net: RIO network + * + * Returns next available device destination ID for the specified RIO network. + * Marks allocated ID as one in use. + * Returns RIO_INVALID_DESTID if new destID is not available. + */ +static u16 rio_destid_alloc(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_first_zero_bit(idtab->table, idtab->max); + + if (destid < idtab->max) { + set_bit(destid, idtab->table); + destid += idtab->start; + } else + destid = RIO_INVALID_DESTID; + + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_destid_reserve - Reserve the specivied destID + * @net: RIO network + * @destid: destID to reserve + * + * Tries to reserve the specified destID. + * Returns 0 if successfull. + */ +static int rio_destid_reserve(struct rio_net *net, u16 destid) +{ + int oldbit; + struct rio_id_table *idtab = &net->destid_table; + + destid -= idtab->start; + spin_lock(&idtab->lock); + oldbit = test_and_set_bit(destid, idtab->table); + spin_unlock(&idtab->lock); + return oldbit; +} + +/** + * rio_destid_free - free a previously allocated destID + * @net: RIO network + * @destid: destID to free + * + * Makes the specified destID available for use. + */ +static void rio_destid_free(struct rio_net *net, u16 destid) +{ + struct rio_id_table *idtab = &net->destid_table; + + destid -= idtab->start; + spin_lock(&idtab->lock); + clear_bit(destid, idtab->table); + spin_unlock(&idtab->lock); +} + +/** + * rio_destid_first - return first destID in use + * @net: RIO network + */ +static u16 rio_destid_first(struct rio_net *net) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_first_bit(idtab->table, idtab->max); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_destid_next - return next destID in use + * @net: RIO network + * @from: destination ID from which search shall continue + */ +static u16 rio_destid_next(struct rio_net *net, u16 from) +{ + int destid; + struct rio_id_table *idtab = &net->destid_table; + + spin_lock(&idtab->lock); + destid = find_next_bit(idtab->table, idtab->max, from); + if (destid >= idtab->max) + destid = RIO_INVALID_DESTID; + else + destid += idtab->start; + spin_unlock(&idtab->lock); + return (u16)destid; +} + +/** + * rio_get_device_id - Get the base/extended device id for a device + * @port: RIO master port + * @destid: Destination ID of device + * @hopcount: Hopcount to device + * + * Reads the base/extended device id from a device. Returns the + * 8/16-bit device ID. + */ +static u16 rio_get_device_id(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + rio_mport_read_config_32(port, destid, hopcount, RIO_DID_CSR, &result); + + return RIO_GET_DID(port->sys_size, result); +} + +/** + * rio_set_device_id - Set the base/extended device id for a device + * @port: RIO master port + * @destid: Destination ID of device + * @hopcount: Hopcount to device + * @did: Device ID value to be written + * + * Writes the base/extended device id from a device. + */ +static void rio_set_device_id(struct rio_mport *port, u16 destid, u8 hopcount, u16 did) +{ + rio_mport_write_config_32(port, destid, hopcount, RIO_DID_CSR, + RIO_SET_DID(port->sys_size, did)); +} + +/** + * rio_local_set_device_id - Set the base/extended device id for a port + * @port: RIO master port + * @did: Device ID value to be written + * + * Writes the base/extended device id from a device. + */ +static void rio_local_set_device_id(struct rio_mport *port, u16 did) +{ + rio_local_write_config_32(port, RIO_DID_CSR, RIO_SET_DID(port->sys_size, + did)); +} + +/** + * rio_clear_locks- Release all host locks and signal enumeration complete + * @net: RIO network to run on + * + * Marks the component tag CSR on each device with the enumeration + * complete flag. When complete, it then release the host locks on + * each device. Returns 0 on success or %-EINVAL on failure. + */ +static int rio_clear_locks(struct rio_net *net) +{ + struct rio_mport *port = net->hport; + struct rio_dev *rdev; + u32 result; + int ret = 0; + + /* Release host device id locks */ + rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + printk(KERN_INFO + "RIO: badness when releasing host lock on master port, result %8.8x\n", + result); + ret = -EINVAL; + } + list_for_each_entry(rdev, &net->devices, net_list) { + rio_write_config_32(rdev, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_read_config_32(rdev, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + printk(KERN_INFO + "RIO: badness when releasing host lock on vid %4.4x did %4.4x\n", + rdev->vid, rdev->did); + ret = -EINVAL; + } + + /* Mark device as discovered and enable master */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + &result); + result |= RIO_PORT_GEN_DISCOVERED | RIO_PORT_GEN_MASTER; + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_GEN_CTL_CSR, + result); + } + + return ret; +} + +/** + * rio_enum_host- Set host lock and initialize host destination ID + * @port: Master port to issue transaction + * + * Sets the local host master port lock and destination ID register + * with the host device ID value. The host device ID value is provided + * by the platform. Returns %0 on success or %-1 on failure. + */ +static int rio_enum_host(struct rio_mport *port) +{ + u32 result; + + /* Set master port host device id lock */ + rio_local_write_config_32(port, RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + + rio_local_read_config_32(port, RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != port->host_deviceid) + return -1; + + /* Set master port destid and init destid ctr */ + rio_local_set_device_id(port, port->host_deviceid); + return 0; +} + +/** + * rio_device_has_destid- Test if a device contains a destination ID register + * @port: Master port to issue transaction + * @src_ops: RIO device source operations + * @dst_ops: RIO device destination operations + * + * Checks the provided @src_ops and @dst_ops for the necessary transaction + * capabilities that indicate whether or not a device will implement a + * destination ID register. Returns 1 if true or 0 if false. + */ +static int rio_device_has_destid(struct rio_mport *port, int src_ops, + int dst_ops) +{ + u32 mask = RIO_OPS_READ | RIO_OPS_WRITE | RIO_OPS_ATOMIC_TST_SWP | RIO_OPS_ATOMIC_INC | RIO_OPS_ATOMIC_DEC | RIO_OPS_ATOMIC_SET | RIO_OPS_ATOMIC_CLR; + + return !!((src_ops | dst_ops) & mask); +} + +/** + * rio_release_dev- Frees a RIO device struct + * @dev: LDM device associated with a RIO device struct + * + * Gets the RIO device struct associated a RIO device struct. + * The RIO device struct is freed. + */ +static void rio_release_dev(struct device *dev) +{ + struct rio_dev *rdev; + + rdev = to_rio_dev(dev); + kfree(rdev); +} + +/** + * rio_is_switch- Tests if a RIO device has switch capabilities + * @rdev: RIO device + * + * Gets the RIO device Processing Element Features register + * contents and tests for switch capabilities. Returns 1 if + * the device is a switch or 0 if it is not a switch. + * The RIO device struct is freed. + */ +static int rio_is_switch(struct rio_dev *rdev) +{ + if (rdev->pef & RIO_PEF_SWITCH) + return 1; + return 0; +} + +/** + * rio_setup_device- Allocates and sets up a RIO device + * @net: RIO network + * @port: Master port to send transactions + * @destid: Current destination ID + * @hopcount: Current hopcount + * @do_enum: Enumeration/Discovery mode flag + * + * Allocates a RIO device and configures fields based on configuration + * space contents. If device has a destination ID register, a destination + * ID is either assigned in enumeration mode or read from configuration + * space in discovery mode. If the device has switch capabilities, then + * a switch is allocated and configured appropriately. Returns a pointer + * to a RIO device on success or NULL on failure. + * + */ +static struct rio_dev *rio_setup_device(struct rio_net *net, + struct rio_mport *port, u16 destid, + u8 hopcount, int do_enum) +{ + int ret = 0; + struct rio_dev *rdev; + struct rio_switch *rswitch = NULL; + int result, rdid; + size_t size; + u32 swpinfo = 0; + + size = sizeof(struct rio_dev); + if (rio_mport_read_config_32(port, destid, hopcount, + RIO_PEF_CAR, &result)) + return NULL; + + if (result & (RIO_PEF_SWITCH | RIO_PEF_MULTIPORT)) { + rio_mport_read_config_32(port, destid, hopcount, + RIO_SWP_INFO_CAR, &swpinfo); + if (result & RIO_PEF_SWITCH) { + size += (RIO_GET_TOTAL_PORTS(swpinfo) * + sizeof(rswitch->nextdev[0])) + sizeof(*rswitch); + } + } + + rdev = kzalloc(size, GFP_KERNEL); + if (!rdev) + return NULL; + + rdev->net = net; + rdev->pef = result; + rdev->swpinfo = swpinfo; + rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_ID_CAR, + &result); + rdev->did = result >> 16; + rdev->vid = result & 0xffff; + rio_mport_read_config_32(port, destid, hopcount, RIO_DEV_INFO_CAR, + &rdev->device_rev); + rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_ID_CAR, + &result); + rdev->asm_did = result >> 16; + rdev->asm_vid = result & 0xffff; + rio_mport_read_config_32(port, destid, hopcount, RIO_ASM_INFO_CAR, + &result); + rdev->asm_rev = result >> 16; + if (rdev->pef & RIO_PEF_EXT_FEATURES) { + rdev->efptr = result & 0xffff; + rdev->phys_efptr = rio_mport_get_physefb(port, 0, destid, + hopcount); + + rdev->em_efptr = rio_mport_get_feature(port, 0, destid, + hopcount, RIO_EFB_ERR_MGMNT); + } + + rio_mport_read_config_32(port, destid, hopcount, RIO_SRC_OPS_CAR, + &rdev->src_ops); + rio_mport_read_config_32(port, destid, hopcount, RIO_DST_OPS_CAR, + &rdev->dst_ops); + + if (do_enum) { + /* Assign component tag to device */ + if (next_comptag >= 0x10000) { + pr_err("RIO: Component Tag Counter Overflow\n"); + goto cleanup; + } + rio_mport_write_config_32(port, destid, hopcount, + RIO_COMPONENT_TAG_CSR, next_comptag); + rdev->comp_tag = next_comptag++; + rdev->do_enum = true; + } else { + rio_mport_read_config_32(port, destid, hopcount, + RIO_COMPONENT_TAG_CSR, + &rdev->comp_tag); + } + + if (rio_device_has_destid(port, rdev->src_ops, rdev->dst_ops)) { + if (do_enum) { + rio_set_device_id(port, destid, hopcount, next_destid); + rdev->destid = next_destid; + next_destid = rio_destid_alloc(net); + } else + rdev->destid = rio_get_device_id(port, destid, hopcount); + + rdev->hopcount = 0xff; + } else { + /* Switch device has an associated destID which + * will be adjusted later + */ + rdev->destid = destid; + rdev->hopcount = hopcount; + } + + /* If a PE has both switch and other functions, show it as a switch */ + if (rio_is_switch(rdev)) { + rswitch = rdev->rswitch; + rswitch->port_ok = 0; + spin_lock_init(&rswitch->lock); + rswitch->route_table = kzalloc(sizeof(u8)* + RIO_MAX_ROUTE_ENTRIES(port->sys_size), + GFP_KERNEL); + if (!rswitch->route_table) + goto cleanup; + /* Initialize switch route table */ + for (rdid = 0; rdid < RIO_MAX_ROUTE_ENTRIES(port->sys_size); + rdid++) + rswitch->route_table[rdid] = RIO_INVALID_ROUTE; + dev_set_name(&rdev->dev, "%02x:s:%04x", rdev->net->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + + if (do_enum) + rio_route_clr_table(rdev, RIO_GLOBAL_TABLE, 0); + + list_add_tail(&rswitch->node, &net->switches); + + } else { + if (do_enum) + /*Enable Input Output Port (transmitter reviever)*/ + rio_enable_rx_tx_port(port, 0, destid, hopcount, 0); + + dev_set_name(&rdev->dev, "%02x:e:%04x", rdev->net->id, + rdev->comp_tag & RIO_CTAG_UDEVID); + } + + rdev->dev.parent = &port->dev; + rio_attach_device(rdev); + + device_initialize(&rdev->dev); + rdev->dev.release = rio_release_dev; + rio_dev_get(rdev); + + rdev->dma_mask = DMA_BIT_MASK(32); + rdev->dev.dma_mask = &rdev->dma_mask; + rdev->dev.coherent_dma_mask = DMA_BIT_MASK(32); + + if (rdev->dst_ops & RIO_DST_OPS_DOORBELL) + rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE], + 0, 0xffff); + + ret = rio_add_device(rdev); + if (ret) + goto cleanup; + + return rdev; + +cleanup: + if (rswitch) + kfree(rswitch->route_table); + + kfree(rdev); + return NULL; +} + +/** + * rio_sport_is_active- Tests if a switch port has an active connection. + * @port: Master port to send transaction + * @destid: Associated destination ID for switch + * @hopcount: Hopcount to reach switch + * @sport: Switch port number + * + * Reads the port error status CSR for a particular switch port to + * determine if the port has an active link. Returns + * %RIO_PORT_N_ERR_STS_PORT_OK if the port is active or %0 if it is + * inactive. + */ +static int +rio_sport_is_active(struct rio_mport *port, u16 destid, u8 hopcount, int sport) +{ + u32 result = 0; + u32 ext_ftr_ptr; + + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, 0); + + while (ext_ftr_ptr) { + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &result); + result = RIO_GET_BLOCK_ID(result); + if ((result == RIO_EFB_SER_EP_FREE_ID) || + (result == RIO_EFB_SER_EP_FREE_ID_V13P) || + (result == RIO_EFB_SER_EP_FREC_ID)) + break; + + ext_ftr_ptr = rio_mport_get_efb(port, 0, destid, hopcount, + ext_ftr_ptr); + } + + if (ext_ftr_ptr) + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + + RIO_PORT_N_ERR_STS_CSR(sport), + &result); + + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +/** + * rio_get_host_deviceid_lock- Reads the Host Device ID Lock CSR on a device + * @port: Master port to send transaction + * @hopcount: Number of hops to the device + * + * Used during enumeration to read the Host Device ID Lock CSR on a + * RIO device. Returns the value of the lock register. + */ +static u16 rio_get_host_deviceid_lock(struct rio_mport *port, u8 hopcount) +{ + u32 result; + + rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + return (u16) (result & 0xffff); +} + +/** + * rio_enum_peer- Recursively enumerate a RIO network through a master port + * @net: RIO network being enumerated + * @port: Master port to send transactions + * @hopcount: Number of hops into the network + * @prev: Previous RIO device connected to the enumerated one + * @prev_port: Port on previous RIO device + * + * Recursively enumerates a RIO network. Transactions are sent via the + * master port passed in @port. + */ +static int rio_enum_peer(struct rio_net *net, struct rio_mport *port, + u8 hopcount, struct rio_dev *prev, int prev_port) +{ + struct rio_dev *rdev; + u32 regval; + int tmp; + + if (rio_mport_chk_dev_access(port, + RIO_ANY_DESTID(port->sys_size), hopcount)) { + pr_debug("RIO: device access check failed\n"); + return -1; + } + + if (rio_get_host_deviceid_lock(port, hopcount) == port->host_deviceid) { + pr_debug("RIO: PE already discovered by this host\n"); + /* + * Already discovered by this host. Add it as another + * link to the existing device. + */ + rio_mport_read_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, RIO_COMPONENT_TAG_CSR, ®val); + + if (regval) { + rdev = rio_get_comptag((regval & 0xffff), NULL); + + if (rdev && prev && rio_is_switch(prev)) { + pr_debug("RIO: redundant path to %s\n", + rio_name(rdev)); + prev->rswitch->nextdev[prev_port] = rdev; + } + } + + return 0; + } + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + while ((tmp = rio_get_host_deviceid_lock(port, hopcount)) + < port->host_deviceid) { + /* Delay a bit */ + mdelay(1); + /* Attempt to acquire device lock again */ + rio_mport_write_config_32(port, RIO_ANY_DESTID(port->sys_size), + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + } + + if (rio_get_host_deviceid_lock(port, hopcount) > port->host_deviceid) { + pr_debug( + "RIO: PE locked by a higher priority host...retreating\n"); + return -1; + } + + /* Setup new RIO device */ + rdev = rio_setup_device(net, port, RIO_ANY_DESTID(port->sys_size), + hopcount, 1); + if (rdev) { + /* Add device to the global and bus/net specific list. */ + list_add_tail(&rdev->net_list, &net->devices); + rdev->prev = prev; + if (prev && rio_is_switch(prev)) + prev->rswitch->nextdev[prev_port] = rdev; + } else + return -1; + + if (rio_is_switch(rdev)) { + int sw_destid; + int cur_destid; + int sw_inport; + u16 destid; + int port_num; + + sw_inport = RIO_GET_PORT_NUM(rdev->swpinfo); + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + port->host_deviceid, sw_inport, 0); + rdev->rswitch->route_table[port->host_deviceid] = sw_inport; + + destid = rio_destid_first(net); + while (destid != RIO_INVALID_DESTID && destid < next_destid) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + destid, sw_inport, 0); + rdev->rswitch->route_table[destid] = sw_inport; + } + destid = rio_destid_next(net, destid + 1); + } + pr_debug( + "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", + rio_name(rdev), rdev->vid, rdev->did, + RIO_GET_TOTAL_PORTS(rdev->swpinfo)); + sw_destid = next_destid; + for (port_num = 0; + port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); + port_num++) { + if (sw_inport == port_num) { + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + rdev->rswitch->port_ok |= (1 << port_num); + continue; + } + + cur_destid = next_destid; + + if (rio_sport_is_active + (port, RIO_ANY_DESTID(port->sys_size), hopcount, + port_num)) { + pr_debug( + "RIO: scanning device on port %d\n", + port_num); + rio_enable_rx_tx_port(port, 0, + RIO_ANY_DESTID(port->sys_size), + hopcount, port_num); + rdev->rswitch->port_ok |= (1 << port_num); + rio_route_add_entry(rdev, RIO_GLOBAL_TABLE, + RIO_ANY_DESTID(port->sys_size), + port_num, 0); + + if (rio_enum_peer(net, port, hopcount + 1, + rdev, port_num) < 0) + return -1; + + /* Update routing tables */ + destid = rio_destid_next(net, cur_destid + 1); + if (destid != RIO_INVALID_DESTID) { + for (destid = cur_destid; + destid < next_destid;) { + if (destid != port->host_deviceid) { + rio_route_add_entry(rdev, + RIO_GLOBAL_TABLE, + destid, + port_num, + 0); + rdev->rswitch-> + route_table[destid] = + port_num; + } + destid = rio_destid_next(net, + destid + 1); + } + } + } else { + /* If switch supports Error Management, + * set PORT_LOCKOUT bit for unused port + */ + if (rdev->em_efptr) + rio_set_port_lockout(rdev, port_num, 1); + + rdev->rswitch->port_ok &= ~(1 << port_num); + } + } + + /* Direct Port-write messages to the enumeratiing host */ + if ((rdev->src_ops & RIO_SRC_OPS_PORT_WRITE) && + (rdev->em_efptr)) { + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PW_TGT_DEVID, + (port->host_deviceid << 16) | + (port->sys_size << 15)); + } + + rio_init_em(rdev); + + /* Check for empty switch */ + if (next_destid == sw_destid) + next_destid = rio_destid_alloc(net); + + rdev->destid = sw_destid; + } else + pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", + rio_name(rdev), rdev->vid, rdev->did); + + return 0; +} + +/** + * rio_enum_complete- Tests if enumeration of a network is complete + * @port: Master port to send transaction + * + * Tests the PGCCSR discovered bit for non-zero value (enumeration + * complete flag). Return %1 if enumeration is complete or %0 if + * enumeration is incomplete. + */ +static int rio_enum_complete(struct rio_mport *port) +{ + u32 regval; + + rio_local_read_config_32(port, port->phys_efptr + RIO_PORT_GEN_CTL_CSR, + ®val); + return (regval & RIO_PORT_GEN_DISCOVERED) ? 1 : 0; +} + +/** + * rio_disc_peer- Recursively discovers a RIO network through a master port + * @net: RIO network being discovered + * @port: Master port to send transactions + * @destid: Current destination ID in network + * @hopcount: Number of hops into the network + * @prev: previous rio_dev + * @prev_port: previous port number + * + * Recursively discovers a RIO network. Transactions are sent via the + * master port passed in @port. + */ +static int +rio_disc_peer(struct rio_net *net, struct rio_mport *port, u16 destid, + u8 hopcount, struct rio_dev *prev, int prev_port) +{ + u8 port_num, route_port; + struct rio_dev *rdev; + u16 ndestid; + + /* Setup new RIO device */ + if ((rdev = rio_setup_device(net, port, destid, hopcount, 0))) { + /* Add device to the global and bus/net specific list. */ + list_add_tail(&rdev->net_list, &net->devices); + rdev->prev = prev; + if (prev && rio_is_switch(prev)) + prev->rswitch->nextdev[prev_port] = rdev; + } else + return -1; + + if (rio_is_switch(rdev)) { + /* Associated destid is how we accessed this switch */ + rdev->destid = destid; + + pr_debug( + "RIO: found %s (vid %4.4x did %4.4x) with %d ports\n", + rio_name(rdev), rdev->vid, rdev->did, + RIO_GET_TOTAL_PORTS(rdev->swpinfo)); + for (port_num = 0; + port_num < RIO_GET_TOTAL_PORTS(rdev->swpinfo); + port_num++) { + if (RIO_GET_PORT_NUM(rdev->swpinfo) == port_num) + continue; + + if (rio_sport_is_active + (port, destid, hopcount, port_num)) { + pr_debug( + "RIO: scanning device on port %d\n", + port_num); + + rio_lock_device(port, destid, hopcount, 1000); + + for (ndestid = 0; + ndestid < RIO_ANY_DESTID(port->sys_size); + ndestid++) { + rio_route_get_entry(rdev, + RIO_GLOBAL_TABLE, + ndestid, + &route_port, 0); + if (route_port == port_num) + break; + } + + if (ndestid == RIO_ANY_DESTID(port->sys_size)) + continue; + rio_unlock_device(port, destid, hopcount); + if (rio_disc_peer(net, port, ndestid, + hopcount + 1, rdev, port_num) < 0) + return -1; + } + } + } else + pr_debug("RIO: found %s (vid %4.4x did %4.4x)\n", + rio_name(rdev), rdev->vid, rdev->did); + + return 0; +} + +/** + * rio_mport_is_active- Tests if master port link is active + * @port: Master port to test + * + * Reads the port error status CSR for the master port to + * determine if the port has an active link. Returns + * %RIO_PORT_N_ERR_STS_PORT_OK if the master port is active + * or %0 if it is inactive. + */ +static int rio_mport_is_active(struct rio_mport *port) +{ + u32 result = 0; + u32 ext_ftr_ptr; + int *entry = rio_mport_phys_table; + + do { + if ((ext_ftr_ptr = + rio_mport_get_feature(port, 1, 0, 0, *entry))) + break; + } while (*++entry >= 0); + + if (ext_ftr_ptr) + rio_local_read_config_32(port, + ext_ftr_ptr + + RIO_PORT_N_ERR_STS_CSR(port->index), + &result); + + return result & RIO_PORT_N_ERR_STS_PORT_OK; +} + +/** + * rio_alloc_net- Allocate and configure a new RIO network + * @port: Master port associated with the RIO network + * @do_enum: Enumeration/Discovery mode flag + * @start: logical minimal start id for new net + * + * Allocates a RIO network structure, initializes per-network + * list heads, and adds the associated master port to the + * network list of associated master ports. Returns a + * RIO network pointer on success or %NULL on failure. + */ +static struct rio_net *rio_alloc_net(struct rio_mport *port, + int do_enum, u16 start) +{ + struct rio_net *net; + + net = kzalloc(sizeof(struct rio_net), GFP_KERNEL); + if (net && do_enum) { + net->destid_table.table = kcalloc( + BITS_TO_LONGS(RIO_MAX_ROUTE_ENTRIES(port->sys_size)), + sizeof(long), + GFP_KERNEL); + + if (net->destid_table.table == NULL) { + pr_err("RIO: failed to allocate destID table\n"); + kfree(net); + net = NULL; + } else { + net->destid_table.start = start; + net->destid_table.max = + RIO_MAX_ROUTE_ENTRIES(port->sys_size); + spin_lock_init(&net->destid_table.lock); + } + } + + if (net) { + INIT_LIST_HEAD(&net->node); + INIT_LIST_HEAD(&net->devices); + INIT_LIST_HEAD(&net->switches); + INIT_LIST_HEAD(&net->mports); + list_add_tail(&port->nnode, &net->mports); + net->hport = port; + net->id = port->id; + } + return net; +} + +/** + * rio_update_route_tables- Updates route tables in switches + * @net: RIO network to run update on + * + * For each enumerated device, ensure that each switch in a system + * has correct routing entries. Add routes for devices that where + * unknown dirung the first enumeration pass through the switch. + */ +static void rio_update_route_tables(struct rio_net *net) +{ + struct rio_dev *rdev, *swrdev; + struct rio_switch *rswitch; + u8 sport; + u16 destid; + + list_for_each_entry(rdev, &net->devices, net_list) { + + destid = rdev->destid; + + list_for_each_entry(rswitch, &net->switches, node) { + + if (rio_is_switch(rdev) && (rdev->rswitch == rswitch)) + continue; + + if (RIO_INVALID_ROUTE == rswitch->route_table[destid]) { + swrdev = sw_to_rio_dev(rswitch); + + /* Skip if destid ends in empty switch*/ + if (swrdev->destid == destid) + continue; + + sport = RIO_GET_PORT_NUM(swrdev->swpinfo); + + rio_route_add_entry(swrdev, RIO_GLOBAL_TABLE, + destid, sport, 0); + rswitch->route_table[destid] = sport; + } + } + } +} + +/** + * rio_init_em - Initializes RIO Error Management (for switches) + * @rdev: RIO device + * + * For each enumerated switch, call device-specific error management + * initialization routine (if supplied by the switch driver). + */ +static void rio_init_em(struct rio_dev *rdev) +{ + if (rio_is_switch(rdev) && (rdev->em_efptr) && + rdev->rswitch->ops && rdev->rswitch->ops->em_init) { + rdev->rswitch->ops->em_init(rdev); + } +} + +/** + * rio_pw_enable - Enables/disables port-write handling by a master port + * @port: Master port associated with port-write handling + * @enable: 1=enable, 0=disable + */ +static void rio_pw_enable(struct rio_mport *port, int enable) +{ + if (port->ops->pwenable) + port->ops->pwenable(port, enable); +} + +/** + * rio_enum_mport- Start enumeration through a master port + * @mport: Master port to send transactions + * @flags: Enumeration control flags + * + * Starts the enumeration process. If somebody has enumerated our + * master port device, then give up. If not and we have an active + * link, then start recursive peer enumeration. Returns %0 if + * enumeration succeeds or %-EBUSY if enumeration fails. + */ +static int rio_enum_mport(struct rio_mport *mport, u32 flags) +{ + struct rio_net *net = NULL; + int rc = 0; + + printk(KERN_INFO "RIO: enumerate master port %d, %s\n", mport->id, + mport->name); + + /* + * To avoid multiple start requests (repeat enumeration is not supported + * by this method) check if enumeration/discovery was performed for this + * mport: if mport was added into the list of mports for a net exit + * with error. + */ + if (mport->nnode.next || mport->nnode.prev) + return -EBUSY; + + /* If somebody else enumerated our master port device, bail. */ + if (rio_enum_host(mport) < 0) { + printk(KERN_INFO + "RIO: master port %d device has been enumerated by a remote host\n", + mport->id); + rc = -EBUSY; + goto out; + } + + /* If master port has an active link, allocate net and enum peers */ + if (rio_mport_is_active(mport)) { + net = rio_alloc_net(mport, 1, 0); + if (!net) { + printk(KERN_ERR "RIO: failed to allocate new net\n"); + rc = -ENOMEM; + goto out; + } + + /* reserve mport destID in new net */ + rio_destid_reserve(net, mport->host_deviceid); + + /* Enable Input Output Port (transmitter reviever) */ + rio_enable_rx_tx_port(mport, 1, 0, 0, 0); + + /* Set component tag for host */ + rio_local_write_config_32(mport, RIO_COMPONENT_TAG_CSR, + next_comptag++); + + next_destid = rio_destid_alloc(net); + + if (rio_enum_peer(net, mport, 0, NULL, 0) < 0) { + /* A higher priority host won enumeration, bail. */ + printk(KERN_INFO + "RIO: master port %d device has lost enumeration to a remote host\n", + mport->id); + rio_clear_locks(net); + rc = -EBUSY; + goto out; + } + /* free the last allocated destID (unused) */ + rio_destid_free(net, next_destid); + rio_update_route_tables(net); + rio_clear_locks(net); + rio_pw_enable(mport, 1); + } else { + printk(KERN_INFO "RIO: master port %d link inactive\n", + mport->id); + rc = -EINVAL; + } + + out: + return rc; +} + +/** + * rio_build_route_tables- Generate route tables from switch route entries + * @net: RIO network to run route tables scan on + * + * For each switch device, generate a route table by copying existing + * route entries from the switch. + */ +static void rio_build_route_tables(struct rio_net *net) +{ + struct rio_switch *rswitch; + struct rio_dev *rdev; + int i; + u8 sport; + + list_for_each_entry(rswitch, &net->switches, node) { + rdev = sw_to_rio_dev(rswitch); + + rio_lock_device(net->hport, rdev->destid, + rdev->hopcount, 1000); + for (i = 0; + i < RIO_MAX_ROUTE_ENTRIES(net->hport->sys_size); + i++) { + if (rio_route_get_entry(rdev, RIO_GLOBAL_TABLE, + i, &sport, 0) < 0) + continue; + rswitch->route_table[i] = sport; + } + + rio_unlock_device(net->hport, rdev->destid, rdev->hopcount); + } +} + +/** + * rio_disc_mport- Start discovery through a master port + * @mport: Master port to send transactions + * @flags: discovery control flags + * + * Starts the discovery process. If we have an active link, + * then wait for the signal that enumeration is complete (if wait + * is allowed). + * When enumeration completion is signaled, start recursive + * peer discovery. Returns %0 if discovery succeeds or %-EBUSY + * on failure. + */ +static int rio_disc_mport(struct rio_mport *mport, u32 flags) +{ + struct rio_net *net = NULL; + unsigned long to_end; + + printk(KERN_INFO "RIO: discover master port %d, %s\n", mport->id, + mport->name); + + /* If master port has an active link, allocate net and discover peers */ + if (rio_mport_is_active(mport)) { + if (rio_enum_complete(mport)) + goto enum_done; + else if (flags & RIO_SCAN_ENUM_NO_WAIT) + return -EAGAIN; + + pr_debug("RIO: wait for enumeration to complete...\n"); + + to_end = jiffies + CONFIG_RAPIDIO_DISC_TIMEOUT * HZ; + while (time_before(jiffies, to_end)) { + if (rio_enum_complete(mport)) + goto enum_done; + msleep(10); + } + + pr_debug("RIO: discovery timeout on mport %d %s\n", + mport->id, mport->name); + goto bail; +enum_done: + pr_debug("RIO: ... enumeration done\n"); + + net = rio_alloc_net(mport, 0, 0); + if (!net) { + printk(KERN_ERR "RIO: Failed to allocate new net\n"); + goto bail; + } + + /* Read DestID assigned by enumerator */ + rio_local_read_config_32(mport, RIO_DID_CSR, + &mport->host_deviceid); + mport->host_deviceid = RIO_GET_DID(mport->sys_size, + mport->host_deviceid); + + if (rio_disc_peer(net, mport, RIO_ANY_DESTID(mport->sys_size), + 0, NULL, 0) < 0) { + printk(KERN_INFO + "RIO: master port %d device has failed discovery\n", + mport->id); + goto bail; + } + + rio_build_route_tables(net); + } + + return 0; +bail: + return -EBUSY; +} + +static struct rio_scan rio_scan_ops = { + .owner = THIS_MODULE, + .enumerate = rio_enum_mport, + .discover = rio_disc_mport, +}; + +static bool scan; +module_param(scan, bool, 0); +MODULE_PARM_DESC(scan, "Start RapidIO network enumeration/discovery " + "(default = 0)"); + +/** + * rio_basic_attach: + * + * When this enumeration/discovery method is loaded as a module this function + * registers its specific enumeration and discover routines for all available + * RapidIO mport devices. The "scan" command line parameter controls ability of + * the module to start RapidIO enumeration/discovery automatically. + * + * Returns 0 for success or -EIO if unable to register itself. + * + * This enumeration/discovery method cannot be unloaded and therefore does not + * provide a matching cleanup_module routine. + */ + +static int __init rio_basic_attach(void) +{ + if (rio_register_scan(RIO_MPORT_ANY, &rio_scan_ops)) + return -EIO; + if (scan) + rio_init_mports(); + return 0; +} + +late_initcall(rio_basic_attach); + +MODULE_DESCRIPTION("Basic RapidIO enumeration/discovery"); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/rio-sysfs.c b/drivers/rapidio/rio-sysfs.c new file mode 100644 index 000000000..cdb005c00 --- /dev/null +++ b/drivers/rapidio/rio-sysfs.c @@ -0,0 +1,383 @@ +/* + * RapidIO sysfs attributes and support + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include + +#include "rio.h" + +/* Sysfs support */ +#define rio_config_attr(field, format_string) \ +static ssize_t \ +field##_show(struct device *dev, struct device_attribute *attr, char *buf) \ +{ \ + struct rio_dev *rdev = to_rio_dev(dev); \ + \ + return sprintf(buf, format_string, rdev->field); \ +} \ +static DEVICE_ATTR_RO(field); + +rio_config_attr(did, "0x%04x\n"); +rio_config_attr(vid, "0x%04x\n"); +rio_config_attr(device_rev, "0x%08x\n"); +rio_config_attr(asm_did, "0x%04x\n"); +rio_config_attr(asm_vid, "0x%04x\n"); +rio_config_attr(asm_rev, "0x%04x\n"); +rio_config_attr(destid, "0x%04x\n"); +rio_config_attr(hopcount, "0x%02x\n"); + +static ssize_t routes_show(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + char *str = buf; + int i; + + for (i = 0; i < RIO_MAX_ROUTE_ENTRIES(rdev->net->hport->sys_size); + i++) { + if (rdev->rswitch->route_table[i] == RIO_INVALID_ROUTE) + continue; + str += + sprintf(str, "%04x %02x\n", i, + rdev->rswitch->route_table[i]); + } + + return (str - buf); +} +static DEVICE_ATTR_RO(routes); + +static ssize_t lprev_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + return sprintf(buf, "%s\n", + (rdev->prev) ? rio_name(rdev->prev) : "root"); +} +static DEVICE_ATTR_RO(lprev); + +static ssize_t lnext_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + char *str = buf; + int i; + + if (rdev->pef & RIO_PEF_SWITCH) { + for (i = 0; i < RIO_GET_TOTAL_PORTS(rdev->swpinfo); i++) { + if (rdev->rswitch->nextdev[i]) + str += sprintf(str, "%s\n", + rio_name(rdev->rswitch->nextdev[i])); + else + str += sprintf(str, "null\n"); + } + } + + return str - buf; +} +static DEVICE_ATTR_RO(lnext); + +static ssize_t modalias_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + + return sprintf(buf, "rapidio:v%04Xd%04Xav%04Xad%04X\n", + rdev->vid, rdev->did, rdev->asm_vid, rdev->asm_did); +} +static DEVICE_ATTR_RO(modalias); + +static struct attribute *rio_dev_attrs[] = { + &dev_attr_did.attr, + &dev_attr_vid.attr, + &dev_attr_device_rev.attr, + &dev_attr_asm_did.attr, + &dev_attr_asm_vid.attr, + &dev_attr_asm_rev.attr, + &dev_attr_lprev.attr, + &dev_attr_destid.attr, + &dev_attr_modalias.attr, + NULL, +}; + +static const struct attribute_group rio_dev_group = { + .attrs = rio_dev_attrs, +}; + +const struct attribute_group *rio_dev_groups[] = { + &rio_dev_group, + NULL, +}; + +static ssize_t +rio_read_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct rio_dev *dev = + to_rio_dev(container_of(kobj, struct device, kobj)); + unsigned int size = 0x100; + loff_t init_off = off; + u8 *data = (u8 *) buf; + + /* Several chips lock up trying to read undefined config space */ + if (capable(CAP_SYS_ADMIN)) + size = RIO_MAINT_SPACE_SZ; + + if (off >= size) + return 0; + if (off + count > size) { + size -= off; + count = size; + } else { + size = count; + } + + if ((off & 1) && size) { + u8 val; + rio_read_config_8(dev, off, &val); + data[off - init_off] = val; + off++; + size--; + } + + if ((off & 3) && size > 2) { + u16 val; + rio_read_config_16(dev, off, &val); + data[off - init_off] = (val >> 8) & 0xff; + data[off - init_off + 1] = val & 0xff; + off += 2; + size -= 2; + } + + while (size > 3) { + u32 val; + rio_read_config_32(dev, off, &val); + data[off - init_off] = (val >> 24) & 0xff; + data[off - init_off + 1] = (val >> 16) & 0xff; + data[off - init_off + 2] = (val >> 8) & 0xff; + data[off - init_off + 3] = val & 0xff; + off += 4; + size -= 4; + } + + if (size >= 2) { + u16 val; + rio_read_config_16(dev, off, &val); + data[off - init_off] = (val >> 8) & 0xff; + data[off - init_off + 1] = val & 0xff; + off += 2; + size -= 2; + } + + if (size > 0) { + u8 val; + rio_read_config_8(dev, off, &val); + data[off - init_off] = val; + off++; + --size; + } + + return count; +} + +static ssize_t +rio_write_config(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + char *buf, loff_t off, size_t count) +{ + struct rio_dev *dev = + to_rio_dev(container_of(kobj, struct device, kobj)); + unsigned int size = count; + loff_t init_off = off; + u8 *data = (u8 *) buf; + + if (off >= RIO_MAINT_SPACE_SZ) + return 0; + if (off + count > RIO_MAINT_SPACE_SZ) { + size = RIO_MAINT_SPACE_SZ - off; + count = size; + } + + if ((off & 1) && size) { + rio_write_config_8(dev, off, data[off - init_off]); + off++; + size--; + } + + if ((off & 3) && (size > 2)) { + u16 val = data[off - init_off + 1]; + val |= (u16) data[off - init_off] << 8; + rio_write_config_16(dev, off, val); + off += 2; + size -= 2; + } + + while (size > 3) { + u32 val = data[off - init_off + 3]; + val |= (u32) data[off - init_off + 2] << 8; + val |= (u32) data[off - init_off + 1] << 16; + val |= (u32) data[off - init_off] << 24; + rio_write_config_32(dev, off, val); + off += 4; + size -= 4; + } + + if (size >= 2) { + u16 val = data[off - init_off + 1]; + val |= (u16) data[off - init_off] << 8; + rio_write_config_16(dev, off, val); + off += 2; + size -= 2; + } + + if (size) { + rio_write_config_8(dev, off, data[off - init_off]); + off++; + --size; + } + + return count; +} + +static struct bin_attribute rio_config_attr = { + .attr = { + .name = "config", + .mode = S_IRUGO | S_IWUSR, + }, + .size = RIO_MAINT_SPACE_SZ, + .read = rio_read_config, + .write = rio_write_config, +}; + +/** + * rio_create_sysfs_dev_files - create RIO specific sysfs files + * @rdev: device whose entries should be created + * + * Create files when @rdev is added to sysfs. + */ +int rio_create_sysfs_dev_files(struct rio_dev *rdev) +{ + int err = 0; + + err = device_create_bin_file(&rdev->dev, &rio_config_attr); + + if (!err && (rdev->pef & RIO_PEF_SWITCH)) { + err |= device_create_file(&rdev->dev, &dev_attr_routes); + err |= device_create_file(&rdev->dev, &dev_attr_lnext); + err |= device_create_file(&rdev->dev, &dev_attr_hopcount); + } + + if (err) + pr_warning("RIO: Failed to create attribute file(s) for %s\n", + rio_name(rdev)); + + return err; +} + +/** + * rio_remove_sysfs_dev_files - cleanup RIO specific sysfs files + * @rdev: device whose entries we should free + * + * Cleanup when @rdev is removed from sysfs. + */ +void rio_remove_sysfs_dev_files(struct rio_dev *rdev) +{ + device_remove_bin_file(&rdev->dev, &rio_config_attr); + if (rdev->pef & RIO_PEF_SWITCH) { + device_remove_file(&rdev->dev, &dev_attr_routes); + device_remove_file(&rdev->dev, &dev_attr_lnext); + device_remove_file(&rdev->dev, &dev_attr_hopcount); + } +} + +static ssize_t bus_scan_store(struct bus_type *bus, const char *buf, + size_t count) +{ + long val; + int rc; + + if (kstrtol(buf, 0, &val) < 0) + return -EINVAL; + + if (val == RIO_MPORT_ANY) { + rc = rio_init_mports(); + goto exit; + } + + if (val < 0 || val >= RIO_MAX_MPORTS) + return -EINVAL; + + rc = rio_mport_scan((int)val); +exit: + if (!rc) + rc = count; + + return rc; +} +static BUS_ATTR(scan, (S_IWUSR|S_IWGRP), NULL, bus_scan_store); + +static struct attribute *rio_bus_attrs[] = { + &bus_attr_scan.attr, + NULL, +}; + +static const struct attribute_group rio_bus_group = { + .attrs = rio_bus_attrs, +}; + +const struct attribute_group *rio_bus_groups[] = { + &rio_bus_group, + NULL, +}; + +static ssize_t +port_destid_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rio_mport *mport = to_rio_mport(dev); + + if (mport) + return sprintf(buf, "0x%04x\n", mport->host_deviceid); + else + return -ENODEV; +} +static DEVICE_ATTR_RO(port_destid); + +static ssize_t sys_size_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + struct rio_mport *mport = to_rio_mport(dev); + + if (mport) + return sprintf(buf, "%u\n", mport->sys_size); + else + return -ENODEV; +} +static DEVICE_ATTR_RO(sys_size); + +static struct attribute *rio_mport_attrs[] = { + &dev_attr_port_destid.attr, + &dev_attr_sys_size.attr, + NULL, +}; + +static const struct attribute_group rio_mport_group = { + .attrs = rio_mport_attrs, +}; + +const struct attribute_group *rio_mport_groups[] = { + &rio_mport_group, + NULL, +}; diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c new file mode 100644 index 000000000..d7b87c64b --- /dev/null +++ b/drivers/rapidio/rio.c @@ -0,0 +1,1969 @@ +/* + * RapidIO interconnect services + * (RapidIO Interconnect Specification, http://www.rapidio.org) + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * Copyright 2009 - 2013 Integrated Device Technology, Inc. + * Alex Bounine + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rio.h" + +MODULE_DESCRIPTION("RapidIO Subsystem Core"); +MODULE_AUTHOR("Matt Porter "); +MODULE_AUTHOR("Alexandre Bounine "); +MODULE_LICENSE("GPL"); + +static int hdid[RIO_MAX_MPORTS]; +static int ids_num; +module_param_array(hdid, int, &ids_num, 0); +MODULE_PARM_DESC(hdid, + "Destination ID assignment to local RapidIO controllers"); + +static LIST_HEAD(rio_devices); +static DEFINE_SPINLOCK(rio_global_list_lock); + +static LIST_HEAD(rio_mports); +static LIST_HEAD(rio_scans); +static DEFINE_MUTEX(rio_mport_list_lock); +static unsigned char next_portid; +static DEFINE_SPINLOCK(rio_mmap_lock); + +/** + * rio_local_get_device_id - Get the base/extended device id for a port + * @port: RIO master port from which to get the deviceid + * + * Reads the base/extended device id from the local device + * implementing the master port. Returns the 8/16-bit device + * id. + */ +u16 rio_local_get_device_id(struct rio_mport *port) +{ + u32 result; + + rio_local_read_config_32(port, RIO_DID_CSR, &result); + + return (RIO_GET_DID(port->sys_size, result)); +} + +/** + * rio_add_device- Adds a RIO device to the device model + * @rdev: RIO device + * + * Adds the RIO device to the global device list and adds the RIO + * device to the RIO device list. Creates the generic sysfs nodes + * for an RIO device. + */ +int rio_add_device(struct rio_dev *rdev) +{ + int err; + + err = device_add(&rdev->dev); + if (err) + return err; + + spin_lock(&rio_global_list_lock); + list_add_tail(&rdev->global_list, &rio_devices); + spin_unlock(&rio_global_list_lock); + + rio_create_sysfs_dev_files(rdev); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_add_device); + +/** + * rio_request_inb_mbox - request inbound mailbox service + * @mport: RIO master port from which to allocate the mailbox resource + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox number to claim + * @entries: Number of entries in inbound mailbox queue + * @minb: Callback to execute when inbound message is received + * + * Requests ownership of an inbound mailbox resource and binds + * a callback function to the resource. Returns %0 on success. + */ +int rio_request_inb_mbox(struct rio_mport *mport, + void *dev_id, + int mbox, + int entries, + void (*minb) (struct rio_mport * mport, void *dev_id, int mbox, + int slot)) +{ + int rc = -ENOSYS; + struct resource *res; + + if (mport->ops->open_inb_mbox == NULL) + goto out; + + res = kmalloc(sizeof(struct resource), GFP_KERNEL); + + if (res) { + rio_init_mbox_res(res, mbox, mbox); + + /* Make sure this mailbox isn't in use */ + if ((rc = + request_resource(&mport->riores[RIO_INB_MBOX_RESOURCE], + res)) < 0) { + kfree(res); + goto out; + } + + mport->inb_msg[mbox].res = res; + + /* Hook the inbound message callback */ + mport->inb_msg[mbox].mcback = minb; + + rc = mport->ops->open_inb_mbox(mport, dev_id, mbox, entries); + } else + rc = -ENOMEM; + + out: + return rc; +} + +/** + * rio_release_inb_mbox - release inbound mailbox message service + * @mport: RIO master port from which to release the mailbox resource + * @mbox: Mailbox number to release + * + * Releases ownership of an inbound mailbox resource. Returns 0 + * if the request has been satisfied. + */ +int rio_release_inb_mbox(struct rio_mport *mport, int mbox) +{ + if (mport->ops->close_inb_mbox) { + mport->ops->close_inb_mbox(mport, mbox); + + /* Release the mailbox resource */ + return release_resource(mport->inb_msg[mbox].res); + } else + return -ENOSYS; +} + +/** + * rio_request_outb_mbox - request outbound mailbox service + * @mport: RIO master port from which to allocate the mailbox resource + * @dev_id: Device specific pointer to pass on event + * @mbox: Mailbox number to claim + * @entries: Number of entries in outbound mailbox queue + * @moutb: Callback to execute when outbound message is sent + * + * Requests ownership of an outbound mailbox resource and binds + * a callback function to the resource. Returns 0 on success. + */ +int rio_request_outb_mbox(struct rio_mport *mport, + void *dev_id, + int mbox, + int entries, + void (*moutb) (struct rio_mport * mport, void *dev_id, int mbox, int slot)) +{ + int rc = -ENOSYS; + struct resource *res; + + if (mport->ops->open_outb_mbox == NULL) + goto out; + + res = kmalloc(sizeof(struct resource), GFP_KERNEL); + + if (res) { + rio_init_mbox_res(res, mbox, mbox); + + /* Make sure this outbound mailbox isn't in use */ + if ((rc = + request_resource(&mport->riores[RIO_OUTB_MBOX_RESOURCE], + res)) < 0) { + kfree(res); + goto out; + } + + mport->outb_msg[mbox].res = res; + + /* Hook the inbound message callback */ + mport->outb_msg[mbox].mcback = moutb; + + rc = mport->ops->open_outb_mbox(mport, dev_id, mbox, entries); + } else + rc = -ENOMEM; + + out: + return rc; +} + +/** + * rio_release_outb_mbox - release outbound mailbox message service + * @mport: RIO master port from which to release the mailbox resource + * @mbox: Mailbox number to release + * + * Releases ownership of an inbound mailbox resource. Returns 0 + * if the request has been satisfied. + */ +int rio_release_outb_mbox(struct rio_mport *mport, int mbox) +{ + if (mport->ops->close_outb_mbox) { + mport->ops->close_outb_mbox(mport, mbox); + + /* Release the mailbox resource */ + return release_resource(mport->outb_msg[mbox].res); + } else + return -ENOSYS; +} + +/** + * rio_setup_inb_dbell - bind inbound doorbell callback + * @mport: RIO master port to bind the doorbell callback + * @dev_id: Device specific pointer to pass on event + * @res: Doorbell message resource + * @dinb: Callback to execute when doorbell is received + * + * Adds a doorbell resource/callback pair into a port's + * doorbell event list. Returns 0 if the request has been + * satisfied. + */ +static int +rio_setup_inb_dbell(struct rio_mport *mport, void *dev_id, struct resource *res, + void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, u16 dst, + u16 info)) +{ + int rc = 0; + struct rio_dbell *dbell; + + if (!(dbell = kmalloc(sizeof(struct rio_dbell), GFP_KERNEL))) { + rc = -ENOMEM; + goto out; + } + + dbell->res = res; + dbell->dinb = dinb; + dbell->dev_id = dev_id; + + list_add_tail(&dbell->node, &mport->dbells); + + out: + return rc; +} + +/** + * rio_request_inb_dbell - request inbound doorbell message service + * @mport: RIO master port from which to allocate the doorbell resource + * @dev_id: Device specific pointer to pass on event + * @start: Doorbell info range start + * @end: Doorbell info range end + * @dinb: Callback to execute when doorbell is received + * + * Requests ownership of an inbound doorbell resource and binds + * a callback function to the resource. Returns 0 if the request + * has been satisfied. + */ +int rio_request_inb_dbell(struct rio_mport *mport, + void *dev_id, + u16 start, + u16 end, + void (*dinb) (struct rio_mport * mport, void *dev_id, u16 src, + u16 dst, u16 info)) +{ + int rc = 0; + + struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); + + if (res) { + rio_init_dbell_res(res, start, end); + + /* Make sure these doorbells aren't in use */ + if ((rc = + request_resource(&mport->riores[RIO_DOORBELL_RESOURCE], + res)) < 0) { + kfree(res); + goto out; + } + + /* Hook the doorbell callback */ + rc = rio_setup_inb_dbell(mport, dev_id, res, dinb); + } else + rc = -ENOMEM; + + out: + return rc; +} + +/** + * rio_release_inb_dbell - release inbound doorbell message service + * @mport: RIO master port from which to release the doorbell resource + * @start: Doorbell info range start + * @end: Doorbell info range end + * + * Releases ownership of an inbound doorbell resource and removes + * callback from the doorbell event list. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_dbell(struct rio_mport *mport, u16 start, u16 end) +{ + int rc = 0, found = 0; + struct rio_dbell *dbell; + + list_for_each_entry(dbell, &mport->dbells, node) { + if ((dbell->res->start == start) && (dbell->res->end == end)) { + found = 1; + break; + } + } + + /* If we can't find an exact match, fail */ + if (!found) { + rc = -EINVAL; + goto out; + } + + /* Delete from list */ + list_del(&dbell->node); + + /* Release the doorbell resource */ + rc = release_resource(dbell->res); + + /* Free the doorbell event */ + kfree(dbell); + + out: + return rc; +} + +/** + * rio_request_outb_dbell - request outbound doorbell message range + * @rdev: RIO device from which to allocate the doorbell resource + * @start: Doorbell message range start + * @end: Doorbell message range end + * + * Requests ownership of a doorbell message range. Returns a resource + * if the request has been satisfied or %NULL on failure. + */ +struct resource *rio_request_outb_dbell(struct rio_dev *rdev, u16 start, + u16 end) +{ + struct resource *res = kmalloc(sizeof(struct resource), GFP_KERNEL); + + if (res) { + rio_init_dbell_res(res, start, end); + + /* Make sure these doorbells aren't in use */ + if (request_resource(&rdev->riores[RIO_DOORBELL_RESOURCE], res) + < 0) { + kfree(res); + res = NULL; + } + } + + return res; +} + +/** + * rio_release_outb_dbell - release outbound doorbell message range + * @rdev: RIO device from which to release the doorbell resource + * @res: Doorbell resource to be freed + * + * Releases ownership of a doorbell message range. Returns 0 if the + * request has been satisfied. + */ +int rio_release_outb_dbell(struct rio_dev *rdev, struct resource *res) +{ + int rc = release_resource(res); + + kfree(res); + + return rc; +} + +/** + * rio_request_inb_pwrite - request inbound port-write message service + * @rdev: RIO device to which register inbound port-write callback routine + * @pwcback: Callback routine to execute when port-write is received + * + * Binds a port-write callback function to the RapidIO device. + * Returns 0 if the request has been satisfied. + */ +int rio_request_inb_pwrite(struct rio_dev *rdev, + int (*pwcback)(struct rio_dev *rdev, union rio_pw_msg *msg, int step)) +{ + int rc = 0; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback != NULL) + rc = -ENOMEM; + else + rdev->pwcback = pwcback; + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_request_inb_pwrite); + +/** + * rio_release_inb_pwrite - release inbound port-write message service + * @rdev: RIO device which registered for inbound port-write callback + * + * Removes callback from the rio_dev structure. Returns 0 if the request + * has been satisfied. + */ +int rio_release_inb_pwrite(struct rio_dev *rdev) +{ + int rc = -ENOMEM; + + spin_lock(&rio_global_list_lock); + if (rdev->pwcback) { + rdev->pwcback = NULL; + rc = 0; + } + + spin_unlock(&rio_global_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(rio_release_inb_pwrite); + +/** + * rio_map_inb_region -- Map inbound memory region. + * @mport: Master port. + * @local: physical address of memory region to be mapped + * @rbase: RIO base address assigned to this window + * @size: Size of the memory region + * @rflags: Flags for mapping. + * + * Return: 0 -- Success. + * + * This function will create the mapping from RIO space to local memory. + */ +int rio_map_inb_region(struct rio_mport *mport, dma_addr_t local, + u64 rbase, u32 size, u32 rflags) +{ + int rc = 0; + unsigned long flags; + + if (!mport->ops->map_inb) + return -1; + spin_lock_irqsave(&rio_mmap_lock, flags); + rc = mport->ops->map_inb(mport, local, rbase, size, rflags); + spin_unlock_irqrestore(&rio_mmap_lock, flags); + return rc; +} +EXPORT_SYMBOL_GPL(rio_map_inb_region); + +/** + * rio_unmap_inb_region -- Unmap the inbound memory region + * @mport: Master port + * @lstart: physical address of memory region to be unmapped + */ +void rio_unmap_inb_region(struct rio_mport *mport, dma_addr_t lstart) +{ + unsigned long flags; + if (!mport->ops->unmap_inb) + return; + spin_lock_irqsave(&rio_mmap_lock, flags); + mport->ops->unmap_inb(mport, lstart); + spin_unlock_irqrestore(&rio_mmap_lock, flags); +} +EXPORT_SYMBOL_GPL(rio_unmap_inb_region); + +/** + * rio_mport_get_physefb - Helper function that returns register offset + * for Physical Layer Extended Features Block. + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + */ +u32 +rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount) +{ + u32 ext_ftr_ptr; + u32 ftr_header; + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, hopcount, 0); + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + + ftr_header = RIO_GET_BLOCK_ID(ftr_header); + switch (ftr_header) { + + case RIO_EFB_SER_EP_ID_V13P: + case RIO_EFB_SER_EP_REC_ID_V13P: + case RIO_EFB_SER_EP_FREE_ID_V13P: + case RIO_EFB_SER_EP_ID: + case RIO_EFB_SER_EP_REC_ID: + case RIO_EFB_SER_EP_FREE_ID: + case RIO_EFB_SER_EP_FREC_ID: + + return ext_ftr_ptr; + + default: + break; + } + + ext_ftr_ptr = rio_mport_get_efb(port, local, destid, + hopcount, ext_ftr_ptr); + } + + return ext_ftr_ptr; +} +EXPORT_SYMBOL_GPL(rio_mport_get_physefb); + +/** + * rio_get_comptag - Begin or continue searching for a RIO device by component tag + * @comp_tag: RIO component tag to match + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @comp_tag, a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. + */ +struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if (rdev->comp_tag == comp_tag) + goto exit; + n = n->next; + } + rdev = NULL; +exit: + spin_unlock(&rio_global_list_lock); + return rdev; +} +EXPORT_SYMBOL_GPL(rio_get_comptag); + +/** + * rio_set_port_lockout - Sets/clears LOCKOUT bit (RIO EM 1.3) for a switch port. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to set LOCKOUT bit + * @lock: Operation : set (=1) or clear (=0) + */ +int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock) +{ + u32 regval; + + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + ®val); + if (lock) + regval |= RIO_PORT_N_CTL_LOCKOUT; + else + regval &= ~RIO_PORT_N_CTL_LOCKOUT; + + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(pnum), + regval); + return 0; +} +EXPORT_SYMBOL_GPL(rio_set_port_lockout); + +/** + * rio_enable_rx_tx_port - enable input receiver and output transmitter of + * given port + * @port: Master port associated with the RIO network + * @local: local=1 select local port otherwise a far device is reached + * @destid: Destination ID of the device to check host bit + * @hopcount: Number of hops to reach the target + * @port_num: Port (-number on switch) to enable on a far end device + * + * Returns 0 or 1 from on General Control Command and Status Register + * (EXT_PTR+0x3C) + */ +int rio_enable_rx_tx_port(struct rio_mport *port, + int local, u16 destid, + u8 hopcount, u8 port_num) +{ +#ifdef CONFIG_RAPIDIO_ENABLE_RX_TX_PORTS + u32 regval; + u32 ext_ftr_ptr; + + /* + * enable rx input tx output port + */ + pr_debug("rio_enable_rx_tx_port(local = %d, destid = %d, hopcount = " + "%d, port_num = %d)\n", local, destid, hopcount, port_num); + + ext_ftr_ptr = rio_mport_get_physefb(port, local, destid, hopcount); + + if (local) { + rio_local_read_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), + ®val); + } else { + if (rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), ®val) < 0) + return -EIO; + } + + if (regval & RIO_PORT_N_CTL_P_TYP_SER) { + /* serial */ + regval = regval | RIO_PORT_N_CTL_EN_RX_SER + | RIO_PORT_N_CTL_EN_TX_SER; + } else { + /* parallel */ + regval = regval | RIO_PORT_N_CTL_EN_RX_PAR + | RIO_PORT_N_CTL_EN_TX_PAR; + } + + if (local) { + rio_local_write_config_32(port, ext_ftr_ptr + + RIO_PORT_N_CTL_CSR(0), regval); + } else { + if (rio_mport_write_config_32(port, destid, hopcount, + ext_ftr_ptr + RIO_PORT_N_CTL_CSR(port_num), regval) < 0) + return -EIO; + } +#endif + return 0; +} +EXPORT_SYMBOL_GPL(rio_enable_rx_tx_port); + + +/** + * rio_chk_dev_route - Validate route to the specified device. + * @rdev: RIO device failed to respond + * @nrdev: Last active device on the route to rdev + * @npnum: nrdev's port number on the route to rdev + * + * Follows a route to the specified RIO device to determine the last available + * device (and corresponding RIO port) on the route. + */ +static int +rio_chk_dev_route(struct rio_dev *rdev, struct rio_dev **nrdev, int *npnum) +{ + u32 result; + int p_port, rc = -EIO; + struct rio_dev *prev = NULL; + + /* Find switch with failed RIO link */ + while (rdev->prev && (rdev->prev->pef & RIO_PEF_SWITCH)) { + if (!rio_read_config_32(rdev->prev, RIO_DEV_ID_CAR, &result)) { + prev = rdev->prev; + break; + } + rdev = rdev->prev; + } + + if (prev == NULL) + goto err_out; + + p_port = prev->rswitch->route_table[rdev->destid]; + + if (p_port != RIO_INVALID_ROUTE) { + pr_debug("RIO: link failed on [%s]-P%d\n", + rio_name(prev), p_port); + *nrdev = prev; + *npnum = p_port; + rc = 0; + } else + pr_debug("RIO: failed to trace route to %s\n", rio_name(rdev)); +err_out: + return rc; +} + +/** + * rio_mport_chk_dev_access - Validate access to the specified device. + * @mport: Master port to send transactions + * @destid: Device destination ID in network + * @hopcount: Number of hops into the network + */ +int +rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, u8 hopcount) +{ + int i = 0; + u32 tmp; + + while (rio_mport_read_config_32(mport, destid, hopcount, + RIO_DEV_ID_CAR, &tmp)) { + i++; + if (i == RIO_MAX_CHK_RETRY) + return -EIO; + mdelay(1); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_chk_dev_access); + +/** + * rio_chk_dev_access - Validate access to the specified device. + * @rdev: Pointer to RIO device control structure + */ +static int rio_chk_dev_access(struct rio_dev *rdev) +{ + return rio_mport_chk_dev_access(rdev->net->hport, + rdev->destid, rdev->hopcount); +} + +/** + * rio_get_input_status - Sends a Link-Request/Input-Status control symbol and + * returns link-response (if requested). + * @rdev: RIO devive to issue Input-status command + * @pnum: Device port number to issue the command + * @lnkresp: Response from a link partner + */ +static int +rio_get_input_status(struct rio_dev *rdev, int pnum, u32 *lnkresp) +{ + u32 regval; + int checkcount; + + if (lnkresp) { + /* Read from link maintenance response register + * to clear valid bit */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), + ®val); + udelay(50); + } + + /* Issue Input-status command */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_MNT_REQ_CSR(pnum), + RIO_MNT_REQ_CMD_IS); + + /* Exit if the response is not expected */ + if (lnkresp == NULL) + return 0; + + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(pnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) { + *lnkresp = regval; + return 0; + } + } + + return -EIO; +} + +/** + * rio_clr_err_stopped - Clears port Error-stopped states. + * @rdev: Pointer to RIO device control structure + * @pnum: Switch port number to clear errors + * @err_status: port error status (if 0 reads register from device) + */ +static int rio_clr_err_stopped(struct rio_dev *rdev, u32 pnum, u32 err_status) +{ + struct rio_dev *nextdev = rdev->rswitch->nextdev[pnum]; + u32 regval; + u32 far_ackid, far_linkstat, near_ackid; + + if (err_status == 0) + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), + &err_status); + + if (err_status & RIO_PORT_N_ERR_STS_PW_OUT_ES) { + pr_debug("RIO_EM: servicing Output Error-Stopped state\n"); + /* + * Send a Link-Request/Input-Status control symbol + */ + if (rio_get_input_status(rdev, pnum, ®val)) { + pr_debug("RIO_EM: Input-status response timeout\n"); + goto rd_err; + } + + pr_debug("RIO_EM: SP%d Input-status response=0x%08x\n", + pnum, regval); + far_ackid = (regval & RIO_PORT_N_MNT_RSP_ASTAT) >> 5; + far_linkstat = regval & RIO_PORT_N_MNT_RSP_LSTAT; + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), + ®val); + pr_debug("RIO_EM: SP%d_ACK_STS_CSR=0x%08x\n", pnum, regval); + near_ackid = (regval & RIO_PORT_N_ACK_INBOUND) >> 24; + pr_debug("RIO_EM: SP%d far_ackID=0x%02x far_linkstat=0x%02x" \ + " near_ackID=0x%02x\n", + pnum, far_ackid, far_linkstat, near_ackid); + + /* + * If required, synchronize ackIDs of near and + * far sides. + */ + if ((far_ackid != ((regval & RIO_PORT_N_ACK_OUTSTAND) >> 8)) || + (far_ackid != (regval & RIO_PORT_N_ACK_OUTBOUND))) { + /* Align near outstanding/outbound ackIDs with + * far inbound. + */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ACK_STS_CSR(pnum), + (near_ackid << 24) | + (far_ackid << 8) | far_ackid); + /* Align far outstanding/outbound ackIDs with + * near inbound. + */ + far_ackid++; + if (nextdev) + rio_write_config_32(nextdev, + nextdev->phys_efptr + + RIO_PORT_N_ACK_STS_CSR(RIO_GET_PORT_NUM(nextdev->swpinfo)), + (far_ackid << 24) | + (near_ackid << 8) | near_ackid); + else + pr_debug("RIO_EM: Invalid nextdev pointer (NULL)\n"); + } +rd_err: + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), + &err_status); + pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); + } + + if ((err_status & RIO_PORT_N_ERR_STS_PW_INP_ES) && nextdev) { + pr_debug("RIO_EM: servicing Input Error-Stopped state\n"); + rio_get_input_status(nextdev, + RIO_GET_PORT_NUM(nextdev->swpinfo), NULL); + udelay(50); + + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(pnum), + &err_status); + pr_debug("RIO_EM: SP%d_ERR_STS_CSR=0x%08x\n", pnum, err_status); + } + + return (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | + RIO_PORT_N_ERR_STS_PW_INP_ES)) ? 1 : 0; +} + +/** + * rio_inb_pwrite_handler - process inbound port-write message + * @pw_msg: pointer to inbound port-write message + * + * Processes an inbound port-write message. Returns 0 if the request + * has been satisfied. + */ +int rio_inb_pwrite_handler(union rio_pw_msg *pw_msg) +{ + struct rio_dev *rdev; + u32 err_status, em_perrdet, em_ltlerrdet; + int rc, portnum; + + rdev = rio_get_comptag((pw_msg->em.comptag & RIO_CTAG_UDEVID), NULL); + if (rdev == NULL) { + /* Device removed or enumeration error */ + pr_debug("RIO: %s No matching device for CTag 0x%08x\n", + __func__, pw_msg->em.comptag); + return -EIO; + } + + pr_debug("RIO: Port-Write message from %s\n", rio_name(rdev)); + +#ifdef DEBUG_PW + { + u32 i; + for (i = 0; i < RIO_PW_MSG_SIZE/sizeof(u32);) { + pr_debug("0x%02x: %08x %08x %08x %08x\n", + i*4, pw_msg->raw[i], pw_msg->raw[i + 1], + pw_msg->raw[i + 2], pw_msg->raw[i + 3]); + i += 4; + } + } +#endif + + /* Call an external service function (if such is registered + * for this device). This may be the service for endpoints that send + * device-specific port-write messages. End-point messages expected + * to be handled completely by EP specific device driver. + * For switches rc==0 signals that no standard processing required. + */ + if (rdev->pwcback != NULL) { + rc = rdev->pwcback(rdev, pw_msg, 0); + if (rc == 0) + return 0; + } + + portnum = pw_msg->em.is_port & 0xFF; + + /* Check if device and route to it are functional: + * Sometimes devices may send PW message(s) just before being + * powered down (or link being lost). + */ + if (rio_chk_dev_access(rdev)) { + pr_debug("RIO: device access failed - get link partner\n"); + /* Scan route to the device and identify failed link. + * This will replace device and port reported in PW message. + * PW message should not be used after this point. + */ + if (rio_chk_dev_route(rdev, &rdev, &portnum)) { + pr_err("RIO: Route trace for %s failed\n", + rio_name(rdev)); + return -EIO; + } + pw_msg = NULL; + } + + /* For End-point devices processing stops here */ + if (!(rdev->pef & RIO_PEF_SWITCH)) + return 0; + + if (rdev->phys_efptr == 0) { + pr_err("RIO_PW: Bad switch initialization for %s\n", + rio_name(rdev)); + return 0; + } + + /* + * Process the port-write notification from switch + */ + if (rdev->rswitch->ops && rdev->rswitch->ops->em_handle) + rdev->rswitch->ops->em_handle(rdev, portnum); + + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + pr_debug("RIO_PW: SP%d_ERR_STS_CSR=0x%08x\n", portnum, err_status); + + if (err_status & RIO_PORT_N_ERR_STS_PORT_OK) { + + if (!(rdev->rswitch->port_ok & (1 << portnum))) { + rdev->rswitch->port_ok |= (1 << portnum); + rio_set_port_lockout(rdev, portnum, 0); + /* Schedule Insertion Service */ + pr_debug("RIO_PW: Device Insertion on [%s]-P%d\n", + rio_name(rdev), portnum); + } + + /* Clear error-stopped states (if reported). + * Depending on the link partner state, two attempts + * may be needed for successful recovery. + */ + if (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | + RIO_PORT_N_ERR_STS_PW_INP_ES)) { + if (rio_clr_err_stopped(rdev, portnum, err_status)) + rio_clr_err_stopped(rdev, portnum, 0); + } + } else { /* if (err_status & RIO_PORT_N_ERR_STS_PORT_UNINIT) */ + + if (rdev->rswitch->port_ok & (1 << portnum)) { + rdev->rswitch->port_ok &= ~(1 << portnum); + rio_set_port_lockout(rdev, portnum, 1); + + rio_write_config_32(rdev, + rdev->phys_efptr + + RIO_PORT_N_ACK_STS_CSR(portnum), + RIO_PORT_N_ACK_CLEAR); + + /* Schedule Extraction Service */ + pr_debug("RIO_PW: Device Extraction on [%s]-P%d\n", + rio_name(rdev), portnum); + } + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); + if (em_perrdet) { + pr_debug("RIO_PW: RIO_EM_P%d_ERR_DETECT=0x%08x\n", + portnum, em_perrdet); + /* Clear EM Port N Error Detect CSR */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), 0); + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); + if (em_ltlerrdet) { + pr_debug("RIO_PW: RIO_EM_LTL_ERR_DETECT=0x%08x\n", + em_ltlerrdet); + /* Clear EM L/T Layer Error Detect CSR */ + rio_write_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, 0); + } + + /* Clear remaining error bits and Port-Write Pending bit */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + err_status); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_inb_pwrite_handler); + +/** + * rio_mport_get_efb - get pointer to next extended features block + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @from: Offset of current Extended Feature block header (if 0 starts + * from ExtFeaturePtr) + */ +u32 +rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from) +{ + u32 reg_val; + + if (from == 0) { + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, + ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, ®_val); + return reg_val & RIO_EXT_FTR_PTR_MASK; + } else { + if (local) + rio_local_read_config_32(port, from, ®_val); + else + rio_mport_read_config_32(port, destid, hopcount, + from, ®_val); + return RIO_GET_BLOCK_ID(reg_val); + } +} +EXPORT_SYMBOL_GPL(rio_mport_get_efb); + +/** + * rio_mport_get_feature - query for devices' extended features + * @port: Master port to issue transaction + * @local: Indicate a local master port or remote device access + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @ftr: Extended feature code + * + * Tell if a device supports a given RapidIO capability. + * Returns the offset of the requested extended feature + * block within the device's RIO configuration space or + * 0 in case the device does not support it. Possible + * values for @ftr: + * + * %RIO_EFB_PAR_EP_ID LP/LVDS EP Devices + * + * %RIO_EFB_PAR_EP_REC_ID LP/LVDS EP Recovery Devices + * + * %RIO_EFB_PAR_EP_FREE_ID LP/LVDS EP Free Devices + * + * %RIO_EFB_SER_EP_ID LP/Serial EP Devices + * + * %RIO_EFB_SER_EP_REC_ID LP/Serial EP Recovery Devices + * + * %RIO_EFB_SER_EP_FREE_ID LP/Serial EP Free Devices + */ +u32 +rio_mport_get_feature(struct rio_mport * port, int local, u16 destid, + u8 hopcount, int ftr) +{ + u32 asm_info, ext_ftr_ptr, ftr_header; + + if (local) + rio_local_read_config_32(port, RIO_ASM_INFO_CAR, &asm_info); + else + rio_mport_read_config_32(port, destid, hopcount, + RIO_ASM_INFO_CAR, &asm_info); + + ext_ftr_ptr = asm_info & RIO_EXT_FTR_PTR_MASK; + + while (ext_ftr_ptr) { + if (local) + rio_local_read_config_32(port, ext_ftr_ptr, + &ftr_header); + else + rio_mport_read_config_32(port, destid, hopcount, + ext_ftr_ptr, &ftr_header); + if (RIO_GET_BLOCK_ID(ftr_header) == ftr) + return ext_ftr_ptr; + if (!(ext_ftr_ptr = RIO_GET_BLOCK_PTR(ftr_header))) + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_mport_get_feature); + +/** + * rio_get_asm - Begin or continue searching for a RIO device by vid/did/asm_vid/asm_did + * @vid: RIO vid to match or %RIO_ANY_ID to match all vids + * @did: RIO did to match or %RIO_ANY_ID to match all dids + * @asm_vid: RIO asm_vid to match or %RIO_ANY_ID to match all asm_vids + * @asm_did: RIO asm_did to match or %RIO_ANY_ID to match all asm_dids + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @vid, @did, @asm_vid, @asm_did, the reference + * count to the device is incrememted and a pointer to its device + * structure is returned. Otherwise, %NULL is returned. A new search + * is initiated by passing %NULL to the @from argument. Otherwise, if + * @from is not %NULL, searches continue from next device on the global + * list. The reference count for @from is always decremented if it is + * not %NULL. + */ +struct rio_dev *rio_get_asm(u16 vid, u16 did, + u16 asm_vid, u16 asm_did, struct rio_dev *from) +{ + struct list_head *n; + struct rio_dev *rdev; + + WARN_ON(in_interrupt()); + spin_lock(&rio_global_list_lock); + n = from ? from->global_list.next : rio_devices.next; + + while (n && (n != &rio_devices)) { + rdev = rio_dev_g(n); + if ((vid == RIO_ANY_ID || rdev->vid == vid) && + (did == RIO_ANY_ID || rdev->did == did) && + (asm_vid == RIO_ANY_ID || rdev->asm_vid == asm_vid) && + (asm_did == RIO_ANY_ID || rdev->asm_did == asm_did)) + goto exit; + n = n->next; + } + rdev = NULL; + exit: + rio_dev_put(from); + rdev = rio_dev_get(rdev); + spin_unlock(&rio_global_list_lock); + return rdev; +} + +/** + * rio_get_device - Begin or continue searching for a RIO device by vid/did + * @vid: RIO vid to match or %RIO_ANY_ID to match all vids + * @did: RIO did to match or %RIO_ANY_ID to match all dids + * @from: Previous RIO device found in search, or %NULL for new search + * + * Iterates through the list of known RIO devices. If a RIO device is + * found with a matching @vid and @did, the reference count to the + * device is incrememted and a pointer to its device structure is returned. + * Otherwise, %NULL is returned. A new search is initiated by passing %NULL + * to the @from argument. Otherwise, if @from is not %NULL, searches + * continue from next device on the global list. The reference count for + * @from is always decremented if it is not %NULL. + */ +struct rio_dev *rio_get_device(u16 vid, u16 did, struct rio_dev *from) +{ + return rio_get_asm(vid, did, RIO_ANY_ID, RIO_ANY_ID, from); +} + +/** + * rio_std_route_add_entry - Add switch route table entry using standard + * registers defined in RIO specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: destination port for specified destID + */ +static int +rio_std_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + } + + udelay(10); + return 0; +} + +/** + * rio_std_route_get_entry - Read switch route table entry (port number) + * associated with specified destID using standard registers defined in RIO + * specification rev.1.3 + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + * @route_destid: destID entry in the RT + * @route_port: returned destination port for specified destID + */ +static int +rio_std_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + *route_port = (u8)result; + } + + return 0; +} + +/** + * rio_std_route_clr_table - Clear swotch route table using standard registers + * defined in RIO specification rev.1.3. + * @mport: Master port to issue transaction + * @destid: Destination ID of the device + * @hopcount: Number of switch hops to the device + * @table: routing table ID (global or port-specific) + */ +static int +rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 max_destid = 0xff; + u32 i, pef, id_inc = 1, ext_cfg = 0; + u32 port_sel = RIO_INVALID_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_PEF_CAR, &pef); + + if (mport->sys_size) { + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWITCH_RT_LIMIT, + &max_destid); + max_destid &= RIO_RT_MAX_DESTID; + } + + if (pef & RIO_PEF_EXT_RT) { + ext_cfg = 0x80000000; + id_inc = 4; + port_sel = (RIO_INVALID_ROUTE << 24) | + (RIO_INVALID_ROUTE << 16) | + (RIO_INVALID_ROUTE << 8) | + RIO_INVALID_ROUTE; + } + + for (i = 0; i <= max_destid;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + ext_cfg | i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + port_sel); + i += id_inc; + } + } + + udelay(10); + return 0; +} + +/** + * rio_lock_device - Acquires host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * @wait_ms: Max wait time in msec (0 = no timeout) + * + * Attepts to acquire host device lock for specified device + * Returns 0 if device lock acquired or EINVAL if timeout expires. + */ +int rio_lock_device(struct rio_mport *port, u16 destid, + u8 hopcount, int wait_ms) +{ + u32 result; + int tcnt = 0; + + /* Attempt to acquire device lock */ + rio_mport_write_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + + while (result != port->host_deviceid) { + if (wait_ms != 0 && tcnt == wait_ms) { + pr_debug("RIO: timeout when locking device %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + /* Delay a bit */ + mdelay(1); + tcnt++; + /* Try to acquire device lock again */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_lock_device); + +/** + * rio_unlock_device - Releases host device lock for specified device + * @port: Master port to send transaction + * @destid: Destination ID for device/switch + * @hopcount: Hopcount to reach switch + * + * Returns 0 if device lock released or EINVAL if fails. + */ +int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount) +{ + u32 result; + + /* Release device lock */ + rio_mport_write_config_32(port, destid, + hopcount, + RIO_HOST_DID_LOCK_CSR, + port->host_deviceid); + rio_mport_read_config_32(port, destid, hopcount, + RIO_HOST_DID_LOCK_CSR, &result); + if ((result & 0xffff) != 0xffff) { + pr_debug("RIO: badness when releasing device lock %x:%x\n", + destid, hopcount); + return -EINVAL; + } + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unlock_device); + +/** + * rio_route_add_entry- Add a route entry to a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Port number to be routed + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific add_entry() method to add a route + * entry into a switch routing table. Otherwise uses standard RT update method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_add_entry(struct rio_dev *rdev, + u16 table, u16 route_destid, u8 route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->add_entry == NULL) { + rc = rio_std_route_add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->add_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_add_entry); + +/** + * rio_route_get_entry- Read an entry from a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @route_destid: Destination ID to be routed + * @route_port: Pointer to read port number into + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific get_entry() method to fetch a route + * entry from a switch routing table. Otherwise uses standard RT read method + * as defined by RapidIO specification. A specific routing table can be selected + * using the @table argument if a switch has per port routing tables or + * the standard (or global) table may be used by passing + * %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_get_entry(struct rio_dev *rdev, u16 table, + u16 route_destid, u8 *route_port, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->get_entry == NULL) { + rc = rio_std_route_get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, + route_destid, route_port); + } else if (try_module_get(ops->owner)) { + rc = ops->get_entry(rdev->net->hport, rdev->destid, + rdev->hopcount, table, route_destid, + route_port); + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_get_entry); + +/** + * rio_route_clr_table - Clear a switch routing table + * @rdev: RIO device + * @table: Routing table ID + * @lock: apply a hardware lock on switch device flag (1=lock, 0=no_lock) + * + * If available calls the switch specific clr_table() method to clear a switch + * routing table. Otherwise uses standard RT write method as defined by RapidIO + * specification. A specific routing table can be selected using the @table + * argument if a switch has per port routing tables or the standard (or global) + * table may be used by passing %RIO_GLOBAL_TABLE in @table. + * + * Returns %0 on success or %-EINVAL on failure. + */ +int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock) +{ + int rc = -EINVAL; + struct rio_switch_ops *ops = rdev->rswitch->ops; + + if (lock) { + rc = rio_lock_device(rdev->net->hport, rdev->destid, + rdev->hopcount, 1000); + if (rc) + return rc; + } + + spin_lock(&rdev->rswitch->lock); + + if (ops == NULL || ops->clr_table == NULL) { + rc = rio_std_route_clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + } else if (try_module_get(ops->owner)) { + rc = ops->clr_table(rdev->net->hport, rdev->destid, + rdev->hopcount, table); + + module_put(ops->owner); + } + + spin_unlock(&rdev->rswitch->lock); + + if (lock) + rio_unlock_device(rdev->net->hport, rdev->destid, + rdev->hopcount); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_route_clr_table); + +#ifdef CONFIG_RAPIDIO_DMA_ENGINE + +static bool rio_chan_filter(struct dma_chan *chan, void *arg) +{ + struct rio_mport *mport = arg; + + /* Check that DMA device belongs to the right MPORT */ + return mport == container_of(chan->device, struct rio_mport, dma); +} + +/** + * rio_request_mport_dma - request RapidIO capable DMA channel associated + * with specified local RapidIO mport device. + * @mport: RIO mport to perform DMA data transfers + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_mport_dma(struct rio_mport *mport) +{ + dma_cap_mask_t mask; + + dma_cap_zero(mask); + dma_cap_set(DMA_SLAVE, mask); + return dma_request_channel(mask, rio_chan_filter, mport); +} +EXPORT_SYMBOL_GPL(rio_request_mport_dma); + +/** + * rio_request_dma - request RapidIO capable DMA channel that supports + * specified target RapidIO device. + * @rdev: RIO device associated with DMA transfer + * + * Returns pointer to allocated DMA channel or NULL if failed. + */ +struct dma_chan *rio_request_dma(struct rio_dev *rdev) +{ + return rio_request_mport_dma(rdev->net->hport); +} +EXPORT_SYMBOL_GPL(rio_request_dma); + +/** + * rio_release_dma - release specified DMA channel + * @dchan: DMA channel to release + */ +void rio_release_dma(struct dma_chan *dchan) +{ + dma_release_channel(dchan); +} +EXPORT_SYMBOL_GPL(rio_release_dma); + +/** + * rio_dma_prep_xfer - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @dchan: DMA channel to configure + * @destid: target RapidIO device destination ID + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * Returns pointer to DMA transaction descriptor or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_xfer(struct dma_chan *dchan, + u16 destid, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + struct rio_dma_ext rio_ext; + + if (dchan->device->device_prep_slave_sg == NULL) { + pr_err("%s: prep_rio_sg == NULL\n", __func__); + return NULL; + } + + rio_ext.destid = destid; + rio_ext.rio_addr_u = data->rio_addr_u; + rio_ext.rio_addr = data->rio_addr; + rio_ext.wr_type = data->wr_type; + + return dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len, + direction, flags, &rio_ext); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_xfer); + +/** + * rio_dma_prep_slave_sg - RapidIO specific wrapper + * for device_prep_slave_sg callback defined by DMAENGINE. + * @rdev: RIO device control structure + * @dchan: DMA channel to configure + * @data: RIO specific data descriptor + * @direction: DMA data transfer direction (TO or FROM the device) + * @flags: dmaengine defined flags + * + * Initializes RapidIO capable DMA channel for the specified data transfer. + * Uses DMA channel private extension to pass information related to remote + * target RIO device. + * Returns pointer to DMA transaction descriptor or NULL if failed. + */ +struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev, + struct dma_chan *dchan, struct rio_dma_data *data, + enum dma_transfer_direction direction, unsigned long flags) +{ + return rio_dma_prep_xfer(dchan, rdev->destid, data, direction, flags); +} +EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg); + +#endif /* CONFIG_RAPIDIO_DMA_ENGINE */ + +/** + * rio_find_mport - find RIO mport by its ID + * @mport_id: number (ID) of mport device + * + * Given a RIO mport number, the desired mport is located + * in the global list of mports. If the mport is found, a pointer to its + * data structure is returned. If no mport is found, %NULL is returned. + */ +struct rio_mport *rio_find_mport(int mport_id) +{ + struct rio_mport *port; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + port = NULL; +found: + mutex_unlock(&rio_mport_list_lock); + + return port; +} + +/** + * rio_register_scan - enumeration/discovery method registration interface + * @mport_id: mport device ID for which fabric scan routine has to be set + * (RIO_MPORT_ANY = set for all available mports) + * @scan_ops: enumeration/discovery operations structure + * + * Registers enumeration/discovery operations with RapidIO subsystem and + * attaches it to the specified mport device (or all available mports + * if RIO_MPORT_ANY is specified). + * + * Returns error if the mport already has an enumerator attached to it. + * In case of RIO_MPORT_ANY skips mports with valid scan routines (no error). + */ +int rio_register_scan(int mport_id, struct rio_scan *scan_ops) +{ + struct rio_mport *port; + struct rio_scan_node *scan; + int rc = 0; + + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + + if ((mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) || + !scan_ops) + return -EINVAL; + + mutex_lock(&rio_mport_list_lock); + + /* + * Check if there is another enumerator already registered for + * the same mport ID (including RIO_MPORT_ANY). Multiple enumerators + * for the same mport ID are not supported. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + rc = -EBUSY; + goto err_out; + } + } + + /* + * Allocate and initialize new scan registration node. + */ + scan = kzalloc(sizeof(*scan), GFP_KERNEL); + if (!scan) { + rc = -ENOMEM; + goto err_out; + } + + scan->mport_id = mport_id; + scan->ops = scan_ops; + + /* + * Traverse the list of registered mports to attach this new scan. + * + * The new scan with matching mport ID overrides any previously attached + * scan assuming that old scan (if any) is the default one (based on the + * enumerator registration check above). + * If the new scan is the global one, it will be attached only to mports + * that do not have their own individual operations already attached. + */ + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) { + port->nscan = scan_ops; + break; + } else if (mport_id == RIO_MPORT_ANY && !port->nscan) + port->nscan = scan_ops; + } + + list_add_tail(&scan->node, &rio_scans); + +err_out: + mutex_unlock(&rio_mport_list_lock); + + return rc; +} +EXPORT_SYMBOL_GPL(rio_register_scan); + +/** + * rio_unregister_scan - removes enumeration/discovery method from mport + * @mport_id: mport device ID for which fabric scan routine has to be + * unregistered (RIO_MPORT_ANY = apply to all mports that use + * the specified scan_ops) + * @scan_ops: enumeration/discovery operations structure + * + * Removes enumeration or discovery method assigned to the specified mport + * device. If RIO_MPORT_ANY is specified, removes the specified operations from + * all mports that have them attached. + */ +int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops) +{ + struct rio_mport *port; + struct rio_scan_node *scan; + + pr_debug("RIO: %s for mport_id=%d\n", __func__, mport_id); + + if (mport_id != RIO_MPORT_ANY && mport_id >= RIO_MAX_MPORTS) + return -EINVAL; + + mutex_lock(&rio_mport_list_lock); + + list_for_each_entry(port, &rio_mports, node) + if (port->id == mport_id || + (mport_id == RIO_MPORT_ANY && port->nscan == scan_ops)) + port->nscan = NULL; + + list_for_each_entry(scan, &rio_scans, node) { + if (scan->mport_id == mport_id) { + list_del(&scan->node); + kfree(scan); + break; + } + } + + mutex_unlock(&rio_mport_list_lock); + + return 0; +} +EXPORT_SYMBOL_GPL(rio_unregister_scan); + +/** + * rio_mport_scan - execute enumeration/discovery on the specified mport + * @mport_id: number (ID) of mport device + */ +int rio_mport_scan(int mport_id) +{ + struct rio_mport *port = NULL; + int rc; + + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->id == mport_id) + goto found; + } + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; +found: + if (!port->nscan) { + mutex_unlock(&rio_mport_list_lock); + return -EINVAL; + } + + if (!try_module_get(port->nscan->owner)) { + mutex_unlock(&rio_mport_list_lock); + return -ENODEV; + } + + mutex_unlock(&rio_mport_list_lock); + + if (port->host_deviceid >= 0) + rc = port->nscan->enumerate(port, 0); + else + rc = port->nscan->discover(port, RIO_SCAN_ENUM_NO_WAIT); + + module_put(port->nscan->owner); + return rc; +} + +static void rio_fixup_device(struct rio_dev *dev) +{ +} + +static int rio_init(void) +{ + struct rio_dev *dev = NULL; + + while ((dev = rio_get_device(RIO_ANY_ID, RIO_ANY_ID, dev)) != NULL) { + rio_fixup_device(dev); + } + return 0; +} + +static struct workqueue_struct *rio_wq; + +struct rio_disc_work { + struct work_struct work; + struct rio_mport *mport; +}; + +static void disc_work_handler(struct work_struct *_work) +{ + struct rio_disc_work *work; + + work = container_of(_work, struct rio_disc_work, work); + pr_debug("RIO: discovery work for mport %d %s\n", + work->mport->id, work->mport->name); + if (try_module_get(work->mport->nscan->owner)) { + work->mport->nscan->discover(work->mport, 0); + module_put(work->mport->nscan->owner); + } +} + +int rio_init_mports(void) +{ + struct rio_mport *port; + struct rio_disc_work *work; + int n = 0; + + if (!next_portid) + return -ENODEV; + + /* + * First, run enumerations and check if we need to perform discovery + * on any of the registered mports. + */ + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->host_deviceid >= 0) { + if (port->nscan && try_module_get(port->nscan->owner)) { + port->nscan->enumerate(port, 0); + module_put(port->nscan->owner); + } + } else + n++; + } + mutex_unlock(&rio_mport_list_lock); + + if (!n) + goto no_disc; + + /* + * If we have mports that require discovery schedule a discovery work + * for each of them. If the code below fails to allocate needed + * resources, exit without error to keep results of enumeration + * process (if any). + * TODO: Implement restart of discovery process for all or + * individual discovering mports. + */ + rio_wq = alloc_workqueue("riodisc", 0, 0); + if (!rio_wq) { + pr_err("RIO: unable allocate rio_wq\n"); + goto no_disc; + } + + work = kcalloc(n, sizeof *work, GFP_KERNEL); + if (!work) { + pr_err("RIO: no memory for work struct\n"); + destroy_workqueue(rio_wq); + goto no_disc; + } + + n = 0; + mutex_lock(&rio_mport_list_lock); + list_for_each_entry(port, &rio_mports, node) { + if (port->host_deviceid < 0 && port->nscan) { + work[n].mport = port; + INIT_WORK(&work[n].work, disc_work_handler); + queue_work(rio_wq, &work[n].work); + n++; + } + } + + flush_workqueue(rio_wq); + mutex_unlock(&rio_mport_list_lock); + pr_debug("RIO: destroy discovery workqueue\n"); + destroy_workqueue(rio_wq); + kfree(work); + +no_disc: + rio_init(); + + return 0; +} + +static int rio_get_hdid(int index) +{ + if (ids_num == 0 || ids_num <= index || index >= RIO_MAX_MPORTS) + return -1; + + return hdid[index]; +} + +int rio_register_mport(struct rio_mport *port) +{ + struct rio_scan_node *scan = NULL; + int res = 0; + + if (next_portid >= RIO_MAX_MPORTS) { + pr_err("RIO: reached specified max number of mports\n"); + return 1; + } + + port->id = next_portid++; + port->host_deviceid = rio_get_hdid(port->id); + port->nscan = NULL; + + dev_set_name(&port->dev, "rapidio%d", port->id); + port->dev.class = &rio_mport_class; + + res = device_register(&port->dev); + if (res) + dev_err(&port->dev, "RIO: mport%d registration failed ERR=%d\n", + port->id, res); + else + dev_dbg(&port->dev, "RIO: mport%d registered\n", port->id); + + mutex_lock(&rio_mport_list_lock); + list_add_tail(&port->node, &rio_mports); + + /* + * Check if there are any registered enumeration/discovery operations + * that have to be attached to the added mport. + */ + list_for_each_entry(scan, &rio_scans, node) { + if (port->id == scan->mport_id || + scan->mport_id == RIO_MPORT_ANY) { + port->nscan = scan->ops; + if (port->id == scan->mport_id) + break; + } + } + mutex_unlock(&rio_mport_list_lock); + + pr_debug("RIO: %s %s id=%d\n", __func__, port->name, port->id); + return 0; +} +EXPORT_SYMBOL_GPL(rio_register_mport); + +EXPORT_SYMBOL_GPL(rio_local_get_device_id); +EXPORT_SYMBOL_GPL(rio_get_device); +EXPORT_SYMBOL_GPL(rio_get_asm); +EXPORT_SYMBOL_GPL(rio_request_inb_dbell); +EXPORT_SYMBOL_GPL(rio_release_inb_dbell); +EXPORT_SYMBOL_GPL(rio_request_outb_dbell); +EXPORT_SYMBOL_GPL(rio_release_outb_dbell); +EXPORT_SYMBOL_GPL(rio_request_inb_mbox); +EXPORT_SYMBOL_GPL(rio_release_inb_mbox); +EXPORT_SYMBOL_GPL(rio_request_outb_mbox); +EXPORT_SYMBOL_GPL(rio_release_outb_mbox); +EXPORT_SYMBOL_GPL(rio_init_mports); diff --git a/drivers/rapidio/rio.h b/drivers/rapidio/rio.h new file mode 100644 index 000000000..2d0550e08 --- /dev/null +++ b/drivers/rapidio/rio.h @@ -0,0 +1,56 @@ +/* + * RapidIO interconnect services + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include + +#define RIO_MAX_CHK_RETRY 3 +#define RIO_MPORT_ANY (-1) + +/* Functions internal to the RIO core code */ + +extern u32 rio_mport_get_feature(struct rio_mport *mport, int local, u16 destid, + u8 hopcount, int ftr); +extern u32 rio_mport_get_physefb(struct rio_mport *port, int local, + u16 destid, u8 hopcount); +extern u32 rio_mport_get_efb(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u32 from); +extern int rio_mport_chk_dev_access(struct rio_mport *mport, u16 destid, + u8 hopcount); +extern int rio_create_sysfs_dev_files(struct rio_dev *rdev); +extern int rio_lock_device(struct rio_mport *port, u16 destid, + u8 hopcount, int wait_ms); +extern int rio_unlock_device(struct rio_mport *port, u16 destid, u8 hopcount); +extern int rio_route_add_entry(struct rio_dev *rdev, + u16 table, u16 route_destid, u8 route_port, int lock); +extern int rio_route_get_entry(struct rio_dev *rdev, u16 table, + u16 route_destid, u8 *route_port, int lock); +extern int rio_route_clr_table(struct rio_dev *rdev, u16 table, int lock); +extern int rio_set_port_lockout(struct rio_dev *rdev, u32 pnum, int lock); +extern struct rio_dev *rio_get_comptag(u32 comp_tag, struct rio_dev *from); +extern int rio_add_device(struct rio_dev *rdev); +extern int rio_enable_rx_tx_port(struct rio_mport *port, int local, u16 destid, + u8 hopcount, u8 port_num); +extern int rio_register_scan(int mport_id, struct rio_scan *scan_ops); +extern int rio_unregister_scan(int mport_id, struct rio_scan *scan_ops); +extern void rio_attach_device(struct rio_dev *rdev); +extern struct rio_mport *rio_find_mport(int mport_id); +extern int rio_mport_scan(int mport_id); + +/* Structures internal to the RIO core code */ +extern const struct attribute_group *rio_dev_groups[]; +extern const struct attribute_group *rio_bus_groups[]; +extern const struct attribute_group *rio_mport_groups[]; + +#define RIO_GET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x00ff0000) >> 16)) +#define RIO_SET_DID(size, x) (size ? (x & 0xffff) : ((x & 0x000000ff) << 16)) diff --git a/drivers/rapidio/switches/Kconfig b/drivers/rapidio/switches/Kconfig new file mode 100644 index 000000000..345841562 --- /dev/null +++ b/drivers/rapidio/switches/Kconfig @@ -0,0 +1,24 @@ +# +# RapidIO switches configuration +# +config RAPIDIO_TSI57X + tristate "IDT Tsi57x SRIO switches support" + ---help--- + Includes support for IDT Tsi57x family of serial RapidIO switches. + +config RAPIDIO_CPS_XX + tristate "IDT CPS-xx SRIO switches support" + ---help--- + Includes support for IDT CPS-16/12/10/8 serial RapidIO switches. + +config RAPIDIO_TSI568 + tristate "Tsi568 SRIO switch support" + default n + ---help--- + Includes support for IDT Tsi568 serial RapidIO switch. + +config RAPIDIO_CPS_GEN2 + tristate "IDT CPS Gen.2 SRIO switch support" + default n + ---help--- + Includes support for ITD CPS Gen.2 serial RapidIO switches. diff --git a/drivers/rapidio/switches/Makefile b/drivers/rapidio/switches/Makefile new file mode 100644 index 000000000..051cc6b38 --- /dev/null +++ b/drivers/rapidio/switches/Makefile @@ -0,0 +1,8 @@ +# +# Makefile for RIO switches +# + +obj-$(CONFIG_RAPIDIO_TSI57X) += tsi57x.o +obj-$(CONFIG_RAPIDIO_CPS_XX) += idtcps.o +obj-$(CONFIG_RAPIDIO_TSI568) += tsi568.o +obj-$(CONFIG_RAPIDIO_CPS_GEN2) += idt_gen2.o diff --git a/drivers/rapidio/switches/idt_gen2.c b/drivers/rapidio/switches/idt_gen2.c new file mode 100644 index 000000000..9f7fe2158 --- /dev/null +++ b/drivers/rapidio/switches/idt_gen2.c @@ -0,0 +1,495 @@ +/* + * IDT CPS Gen.2 Serial RapidIO switch family support + * + * Copyright 2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include "../rio.h" + +#define LOCAL_RTE_CONF_DESTID_SEL 0x010070 +#define LOCAL_RTE_CONF_DESTID_SEL_PSEL 0x0000001f + +#define IDT_LT_ERR_REPORT_EN 0x03100c + +#define IDT_PORT_ERR_REPORT_EN(n) (0x031044 + (n)*0x40) +#define IDT_PORT_ERR_REPORT_EN_BC 0x03ff04 + +#define IDT_PORT_ISERR_REPORT_EN(n) (0x03104C + (n)*0x40) +#define IDT_PORT_ISERR_REPORT_EN_BC 0x03ff0c +#define IDT_PORT_INIT_TX_ACQUIRED 0x00000020 + +#define IDT_LANE_ERR_REPORT_EN(n) (0x038010 + (n)*0x100) +#define IDT_LANE_ERR_REPORT_EN_BC 0x03ff10 + +#define IDT_DEV_CTRL_1 0xf2000c +#define IDT_DEV_CTRL_1_GENPW 0x02000000 +#define IDT_DEV_CTRL_1_PRSTBEH 0x00000001 + +#define IDT_CFGBLK_ERR_CAPTURE_EN 0x020008 +#define IDT_CFGBLK_ERR_REPORT 0xf20014 +#define IDT_CFGBLK_ERR_REPORT_GENPW 0x00000002 + +#define IDT_AUX_PORT_ERR_CAP_EN 0x020000 +#define IDT_AUX_ERR_REPORT_EN 0xf20018 +#define IDT_AUX_PORT_ERR_LOG_I2C 0x00000002 +#define IDT_AUX_PORT_ERR_LOG_JTAG 0x00000001 + +#define IDT_ISLTL_ADDRESS_CAP 0x021014 + +#define IDT_RIO_DOMAIN 0xf20020 +#define IDT_RIO_DOMAIN_MASK 0x000000ff + +#define IDT_PW_INFO_CSR 0xf20024 + +#define IDT_SOFT_RESET 0xf20040 +#define IDT_SOFT_RESET_REQ 0x00030097 + +#define IDT_I2C_MCTRL 0xf20050 +#define IDT_I2C_MCTRL_GENPW 0x04000000 + +#define IDT_JTAG_CTRL 0xf2005c +#define IDT_JTAG_CTRL_GENPW 0x00000002 + +#define IDT_LANE_CTRL(n) (0xff8000 + (n)*0x100) +#define IDT_LANE_CTRL_BC 0xffff00 +#define IDT_LANE_CTRL_GENPW 0x00200000 +#define IDT_LANE_DFE_1_BC 0xffff18 +#define IDT_LANE_DFE_2_BC 0xffff1c + +#define IDT_PORT_OPS(n) (0xf40004 + (n)*0x100) +#define IDT_PORT_OPS_GENPW 0x08000000 +#define IDT_PORT_OPS_PL_ELOG 0x00000040 +#define IDT_PORT_OPS_LL_ELOG 0x00000020 +#define IDT_PORT_OPS_LT_ELOG 0x00000010 +#define IDT_PORT_OPS_BC 0xf4ff04 + +#define IDT_PORT_ISERR_DET(n) (0xf40008 + (n)*0x100) + +#define IDT_ERR_CAP 0xfd0000 +#define IDT_ERR_CAP_LOG_OVERWR 0x00000004 + +#define IDT_ERR_RD 0xfd0004 + +#define IDT_DEFAULT_ROUTE 0xde +#define IDT_NO_ROUTE 0xdf + +static int +idtg2_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + /* + * Select routing table to update + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + if (route_port == RIO_INVALID_ROUTE) + route_port = IDT_DEFAULT_ROUTE; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + /* + * Program destination port for the specified destID + */ + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + (u32)route_destid); + + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (u32)route_port); + udelay(10); + + return 0; +} + +static int +idtg2_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + /* + * Select routing table to read + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, + route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (IDT_DEFAULT_ROUTE == (u8)result || IDT_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + + return 0; +} + +static int +idtg2_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + /* + * Select routing table to read + */ + if (table == RIO_GLOBAL_TABLE) + table = 0; + else + table++; + + rio_mport_write_config_32(mport, destid, hopcount, + LOCAL_RTE_CONF_DESTID_SEL, table); + + for (i = RIO_STD_RTE_CONF_EXTCFGEN; + i <= (RIO_STD_RTE_CONF_EXTCFGEN | 0xff);) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (IDT_DEFAULT_ROUTE << 24) | (IDT_DEFAULT_ROUTE << 16) | + (IDT_DEFAULT_ROUTE << 8) | IDT_DEFAULT_ROUTE); + i += 4; + } + + return 0; +} + + +static int +idtg2_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDT_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtg2_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDT_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static int +idtg2_em_init(struct rio_dev *rdev) +{ + u32 regval; + int i, tmp; + + /* + * This routine performs device-specific initialization only. + * All standard EM configuration should be performed at upper level. + */ + + pr_debug("RIO: %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Set Port-Write info CSR: PRIO=3 and CRF=1 */ + rio_write_config_32(rdev, IDT_PW_INFO_CSR, 0x0000e000); + + /* + * Configure LT LAYER error reporting. + */ + + /* Enable standard (RIO.p8) error reporting */ + rio_write_config_32(rdev, IDT_LT_ERR_REPORT_EN, + REM_LTL_ERR_ILLTRAN | REM_LTL_ERR_UNSOLR | + REM_LTL_ERR_UNSUPTR); + + /* Use Port-Writes for LT layer error reporting. + * Enable per-port reset + */ + rio_read_config_32(rdev, IDT_DEV_CTRL_1, ®val); + rio_write_config_32(rdev, IDT_DEV_CTRL_1, + regval | IDT_DEV_CTRL_1_GENPW | IDT_DEV_CTRL_1_PRSTBEH); + + /* + * Configure PORT error reporting. + */ + + /* Report all RIO.p8 errors supported by device */ + rio_write_config_32(rdev, IDT_PORT_ERR_REPORT_EN_BC, 0x807e8037); + + /* Configure reporting of implementation specific errors/events */ + rio_write_config_32(rdev, IDT_PORT_ISERR_REPORT_EN_BC, + IDT_PORT_INIT_TX_ACQUIRED); + + /* Use Port-Writes for port error reporting and enable error logging */ + tmp = RIO_GET_TOTAL_PORTS(rdev->swpinfo); + for (i = 0; i < tmp; i++) { + rio_read_config_32(rdev, IDT_PORT_OPS(i), ®val); + rio_write_config_32(rdev, + IDT_PORT_OPS(i), regval | IDT_PORT_OPS_GENPW | + IDT_PORT_OPS_PL_ELOG | + IDT_PORT_OPS_LL_ELOG | + IDT_PORT_OPS_LT_ELOG); + } + /* Overwrite error log if full */ + rio_write_config_32(rdev, IDT_ERR_CAP, IDT_ERR_CAP_LOG_OVERWR); + + /* + * Configure LANE error reporting. + */ + + /* Disable line error reporting */ + rio_write_config_32(rdev, IDT_LANE_ERR_REPORT_EN_BC, 0); + + /* Use Port-Writes for lane error reporting (when enabled) + * (do per-lane update because lanes may have different configuration) + */ + tmp = (rdev->did == RIO_DID_IDTCPS1848) ? 48 : 16; + for (i = 0; i < tmp; i++) { + rio_read_config_32(rdev, IDT_LANE_CTRL(i), ®val); + rio_write_config_32(rdev, IDT_LANE_CTRL(i), + regval | IDT_LANE_CTRL_GENPW); + } + + /* + * Configure AUX error reporting. + */ + + /* Disable JTAG and I2C Error capture */ + rio_write_config_32(rdev, IDT_AUX_PORT_ERR_CAP_EN, 0); + + /* Disable JTAG and I2C Error reporting/logging */ + rio_write_config_32(rdev, IDT_AUX_ERR_REPORT_EN, 0); + + /* Disable Port-Write notification from JTAG */ + rio_write_config_32(rdev, IDT_JTAG_CTRL, 0); + + /* Disable Port-Write notification from I2C */ + rio_read_config_32(rdev, IDT_I2C_MCTRL, ®val); + rio_write_config_32(rdev, IDT_I2C_MCTRL, regval & ~IDT_I2C_MCTRL_GENPW); + + /* + * Configure CFG_BLK error reporting. + */ + + /* Disable Configuration Block error capture */ + rio_write_config_32(rdev, IDT_CFGBLK_ERR_CAPTURE_EN, 0); + + /* Disable Port-Writes for Configuration Block error reporting */ + rio_read_config_32(rdev, IDT_CFGBLK_ERR_REPORT, ®val); + rio_write_config_32(rdev, IDT_CFGBLK_ERR_REPORT, + regval & ~IDT_CFGBLK_ERR_REPORT_GENPW); + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + + return 0; +} + +static int +idtg2_em_handler(struct rio_dev *rdev, u8 portnum) +{ + u32 regval, em_perrdet, em_ltlerrdet; + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_LTL_ERR_DETECT, &em_ltlerrdet); + if (em_ltlerrdet) { + /* Service Logical/Transport Layer Error(s) */ + if (em_ltlerrdet & REM_LTL_ERR_IMPSPEC) { + /* Implementation specific error reported */ + rio_read_config_32(rdev, + IDT_ISLTL_ADDRESS_CAP, ®val); + + pr_debug("RIO: %s Implementation Specific LTL errors" \ + " 0x%x @(0x%x)\n", + rio_name(rdev), em_ltlerrdet, regval); + + /* Clear implementation specific address capture CSR */ + rio_write_config_32(rdev, IDT_ISLTL_ADDRESS_CAP, 0); + + } + } + + rio_read_config_32(rdev, + rdev->em_efptr + RIO_EM_PN_ERR_DETECT(portnum), &em_perrdet); + if (em_perrdet) { + /* Service Port-Level Error(s) */ + if (em_perrdet & REM_PED_IMPL_SPEC) { + /* Implementation Specific port error reported */ + + /* Get IS errors reported */ + rio_read_config_32(rdev, + IDT_PORT_ISERR_DET(portnum), ®val); + + pr_debug("RIO: %s Implementation Specific Port" \ + " errors 0x%x\n", rio_name(rdev), regval); + + /* Clear all implementation specific events */ + rio_write_config_32(rdev, + IDT_PORT_ISERR_DET(portnum), 0); + } + } + + return 0; +} + +static ssize_t +idtg2_show_errlog(struct device *dev, struct device_attribute *attr, char *buf) +{ + struct rio_dev *rdev = to_rio_dev(dev); + ssize_t len = 0; + u32 regval; + + while (!rio_read_config_32(rdev, IDT_ERR_RD, ®val)) { + if (!regval) /* 0 = end of log */ + break; + len += snprintf(buf + len, PAGE_SIZE - len, + "%08x\n", regval); + if (len >= (PAGE_SIZE - 10)) + break; + } + + return len; +} + +static DEVICE_ATTR(errlog, S_IRUGO, idtg2_show_errlog, NULL); + +static int idtg2_sysfs(struct rio_dev *rdev, bool create) +{ + struct device *dev = &rdev->dev; + int err = 0; + + if (create) { + /* Initialize sysfs entries */ + err = device_create_file(dev, &dev_attr_errlog); + if (err) + dev_err(dev, "Unable create sysfs errlog file\n"); + } else + device_remove_file(dev, &dev_attr_errlog); + + return err; +} + +static struct rio_switch_ops idtg2_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtg2_route_add_entry, + .get_entry = idtg2_route_get_entry, + .clr_table = idtg2_route_clr_table, + .set_domain = idtg2_set_domain, + .get_domain = idtg2_get_domain, + .em_init = idtg2_em_init, + .em_handle = idtg2_em_handler, +}; + +static int idtg2_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtg2_switch_ops; + + if (rdev->do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, IDT_NO_ROUTE); + } + + /* Create device-specific sysfs attributes */ + idtg2_sysfs(rdev, true); + + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void idtg2_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &idtg2_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + + /* Remove device-specific sysfs attributes */ + idtg2_sysfs(rdev, false); + + spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id idtg2_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTCPS1848, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTVPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTSPS1616, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS1432, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtg2_driver = { + .name = "idt_gen2", + .id_table = idtg2_id_table, + .probe = idtg2_probe, + .remove = idtg2_remove, +}; + +static int __init idtg2_init(void) +{ + return rio_register_driver(&idtg2_driver); +} + +static void __exit idtg2_exit(void) +{ + pr_debug("RIO: %s\n", __func__); + rio_unregister_driver(&idtg2_driver); + pr_debug("RIO: %s done\n", __func__); +} + +device_initcall(idtg2_init); +module_exit(idtg2_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.2 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/idtcps.c b/drivers/rapidio/switches/idtcps.c new file mode 100644 index 000000000..7fbb60d31 --- /dev/null +++ b/drivers/rapidio/switches/idtcps.c @@ -0,0 +1,203 @@ +/* + * IDT CPS RapidIO switches support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include "../rio.h" + +#define CPS_DEFAULT_ROUTE 0xde +#define CPS_NO_ROUTE 0xdf + +#define IDTCPS_RIO_DOMAIN 0xf20020 + +static int +idtcps_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + u32 result; + + if (route_port == RIO_INVALID_ROUTE) + route_port = CPS_DEFAULT_ROUTE; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + result = (0xffffff00 & result) | (u32)route_port; + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, result); + } + + return 0; +} + +static int +idtcps_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, route_destid); + + rio_mport_read_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, &result); + + if (CPS_DEFAULT_ROUTE == (u8)result || + CPS_NO_ROUTE == (u8)result) + *route_port = RIO_INVALID_ROUTE; + else + *route_port = (u8)result; + } + + return 0; +} + +static int +idtcps_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 i; + + if (table == RIO_GLOBAL_TABLE) { + for (i = 0x80000000; i <= 0x800000ff;) { + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_DESTID_SEL_CSR, i); + rio_mport_write_config_32(mport, destid, hopcount, + RIO_STD_RTE_CONF_PORT_SEL_CSR, + (CPS_DEFAULT_ROUTE << 24) | + (CPS_DEFAULT_ROUTE << 16) | + (CPS_DEFAULT_ROUTE << 8) | CPS_DEFAULT_ROUTE); + i += 4; + } + } + + return 0; +} + +static int +idtcps_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + /* + * Switch domain configuration operates only at global level + */ + rio_mport_write_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, (u32)sw_domain); + return 0; +} + +static int +idtcps_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + IDTCPS_RIO_DOMAIN, ®val); + + *sw_domain = (u8)(regval & 0xff); + + return 0; +} + +static struct rio_switch_ops idtcps_switch_ops = { + .owner = THIS_MODULE, + .add_entry = idtcps_route_add_entry, + .get_entry = idtcps_route_get_entry, + .clr_table = idtcps_route_clr_table, + .set_domain = idtcps_set_domain, + .get_domain = idtcps_get_domain, + .em_init = NULL, + .em_handle = NULL, +}; + +static int idtcps_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &idtcps_switch_ops; + + if (rdev->do_enum) { + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x8e << 8); + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, + RIO_STD_RTE_DEFAULT_PORT, CPS_NO_ROUTE); + } + + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void idtcps_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &idtcps_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id idtcps_id_table[] = { + {RIO_DEVICE(RIO_DID_IDTCPS6Q, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS8, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS10Q, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS12, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDTCPS16, RIO_VID_IDT)}, + {RIO_DEVICE(RIO_DID_IDT70K200, RIO_VID_IDT)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver idtcps_driver = { + .name = "idtcps", + .id_table = idtcps_id_table, + .probe = idtcps_probe, + .remove = idtcps_remove, +}; + +static int __init idtcps_init(void) +{ + return rio_register_driver(&idtcps_driver); +} + +static void __exit idtcps_exit(void) +{ + rio_unregister_driver(&idtcps_driver); +} + +device_initcall(idtcps_init); +module_exit(idtcps_exit); + +MODULE_DESCRIPTION("IDT CPS Gen.1 Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi568.c b/drivers/rapidio/switches/tsi568.c new file mode 100644 index 000000000..8a43561b9 --- /dev/null +++ b/drivers/rapidio/switches/tsi568.c @@ -0,0 +1,199 @@ +/* + * RapidIO Tsi568 switch support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI568_SP_MODE(n) (0x11004 + 0x100*n) +#define TSI568_SP_MODE_PW_DIS 0x08000000 + +static int +tsi568_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi568_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, &result); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + } + + *route_port = result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi568_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), + 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), + RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi568_em_init(struct rio_dev *rdev) +{ + u32 regval; + int portnum; + + pr_debug("TSI568 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + /* Make sure that Port-Writes are disabled (for all ports) */ + for (portnum = 0; + portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { + rio_read_config_32(rdev, TSI568_SP_MODE(portnum), ®val); + rio_write_config_32(rdev, TSI568_SP_MODE(portnum), + regval | TSI568_SP_MODE_PW_DIS); + } + + return 0; +} + +static struct rio_switch_ops tsi568_switch_ops = { + .owner = THIS_MODULE, + .add_entry = tsi568_route_add_entry, + .get_entry = tsi568_route_get_entry, + .clr_table = tsi568_route_clr_table, + .set_domain = NULL, + .get_domain = NULL, + .em_init = tsi568_em_init, + .em_handle = NULL, +}; + +static int tsi568_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + + rdev->rswitch->ops = &tsi568_switch_ops; + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void tsi568_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &tsi568_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id tsi568_id_table[] = { + {RIO_DEVICE(RIO_DID_TSI568, RIO_VID_TUNDRA)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver tsi568_driver = { + .name = "tsi568", + .id_table = tsi568_id_table, + .probe = tsi568_probe, + .remove = tsi568_remove, +}; + +static int __init tsi568_init(void) +{ + return rio_register_driver(&tsi568_driver); +} + +static void __exit tsi568_exit(void) +{ + rio_unregister_driver(&tsi568_driver); +} + +device_initcall(tsi568_init); +module_exit(tsi568_exit); + +MODULE_DESCRIPTION("IDT Tsi568 Serial RapidIO switch driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); diff --git a/drivers/rapidio/switches/tsi57x.c b/drivers/rapidio/switches/tsi57x.c new file mode 100644 index 000000000..42c8b014f --- /dev/null +++ b/drivers/rapidio/switches/tsi57x.c @@ -0,0 +1,371 @@ +/* + * RapidIO Tsi57x switch family support + * + * Copyright 2009-2010 Integrated Device Technology, Inc. + * Alexandre Bounine + * - Added EM support + * - Modified switch operations initialization. + * + * Copyright 2005 MontaVista Software, Inc. + * Matt Porter + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + */ + +#include +#include +#include +#include +#include +#include "../rio.h" + +/* Global (broadcast) route registers */ +#define SPBC_ROUTE_CFG_DESTID 0x10070 +#define SPBC_ROUTE_CFG_PORT 0x10074 + +/* Per port route registers */ +#define SPP_ROUTE_CFG_DESTID(n) (0x11070 + 0x100*n) +#define SPP_ROUTE_CFG_PORT(n) (0x11074 + 0x100*n) + +#define TSI578_SP_MODE(n) (0x11004 + n*0x100) +#define TSI578_SP_MODE_GLBL 0x10004 +#define TSI578_SP_MODE_PW_DIS 0x08000000 +#define TSI578_SP_MODE_LUT_512 0x01000000 + +#define TSI578_SP_CTL_INDEP(n) (0x13004 + n*0x100) +#define TSI578_SP_LUT_PEINF(n) (0x13010 + n*0x100) +#define TSI578_SP_CS_TX(n) (0x13014 + n*0x100) +#define TSI578_SP_INT_STATUS(n) (0x13018 + n*0x100) + +#define TSI578_GLBL_ROUTE_BASE 0x10078 + +static int +tsi57x_route_add_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 route_port) +{ + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, route_port); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), route_port); + } + + udelay(10); + + return 0; +} + +static int +tsi57x_route_get_entry(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table, u16 route_destid, u8 *route_port) +{ + int ret = 0; + u32 result; + + if (table == RIO_GLOBAL_TABLE) { + /* Use local RT of the ingress port to avoid possible + race condition */ + rio_mport_read_config_32(mport, destid, hopcount, + RIO_SWP_INFO_CAR, &result); + table = (result & RIO_SWP_INFO_PORT_NUM_MASK); + } + + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), route_destid); + rio_mport_read_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table), &result); + + *route_port = (u8)result; + if (*route_port > 15) + ret = -1; + + return ret; +} + +static int +tsi57x_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount, + u16 table) +{ + u32 route_idx; + u32 lut_size; + + lut_size = (mport->sys_size) ? 0x1ff : 0xff; + + if (table == RIO_GLOBAL_TABLE) { + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_DESTID, 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPBC_ROUTE_CFG_PORT, + RIO_INVALID_ROUTE); + } else { + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_DESTID(table), 0x80000000); + for (route_idx = 0; route_idx <= lut_size; route_idx++) + rio_mport_write_config_32(mport, destid, hopcount, + SPP_ROUTE_CFG_PORT(table) , RIO_INVALID_ROUTE); + } + + return 0; +} + +static int +tsi57x_set_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + + /* Turn off flat (LUT_512) mode */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_SP_MODE_GLBL, ®val); + rio_mport_write_config_32(mport, destid, hopcount, TSI578_SP_MODE_GLBL, + regval & ~TSI578_SP_MODE_LUT_512); + /* Set switch domain base */ + rio_mport_write_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, + (u32)(sw_domain << 24)); + return 0; +} + +static int +tsi57x_get_domain(struct rio_mport *mport, u16 destid, u8 hopcount, + u8 *sw_domain) +{ + u32 regval; + + /* + * Switch domain configuration operates only at global level + */ + rio_mport_read_config_32(mport, destid, hopcount, + TSI578_GLBL_ROUTE_BASE, ®val); + + *sw_domain = (u8)(regval >> 24); + + return 0; +} + +static int +tsi57x_em_init(struct rio_dev *rdev) +{ + u32 regval; + int portnum; + + pr_debug("TSI578 %s [%d:%d]\n", __func__, rdev->destid, rdev->hopcount); + + for (portnum = 0; + portnum < RIO_GET_TOTAL_PORTS(rdev->swpinfo); portnum++) { + /* Make sure that Port-Writes are enabled (for all ports) */ + rio_read_config_32(rdev, + TSI578_SP_MODE(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_MODE(portnum), + regval & ~TSI578_SP_MODE_PW_DIS); + + /* Clear all pending interrupts */ + rio_read_config_32(rdev, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + ®val); + rio_write_config_32(rdev, + rdev->phys_efptr + + RIO_PORT_N_ERR_STS_CSR(portnum), + regval & 0x07120214); + + rio_read_config_32(rdev, + TSI578_SP_INT_STATUS(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_INT_STATUS(portnum), + regval & 0x000700bd); + + /* Enable all interrupts to allow ports to send a port-write */ + rio_read_config_32(rdev, + TSI578_SP_CTL_INDEP(portnum), ®val); + rio_write_config_32(rdev, + TSI578_SP_CTL_INDEP(portnum), + regval | 0x000b0000); + + /* Skip next (odd) port if the current port is in x4 mode */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if ((regval & RIO_PORT_N_CTL_PWIDTH) == RIO_PORT_N_CTL_PWIDTH_4) + portnum++; + } + + /* set TVAL = ~50us */ + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_LINKTO_CTL_CSR, 0x9a << 8); + + return 0; +} + +static int +tsi57x_em_handler(struct rio_dev *rdev, u8 portnum) +{ + struct rio_mport *mport = rdev->net->hport; + u32 intstat, err_status; + int sendcount, checkcount; + u8 route_port; + u32 regval; + + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_ERR_STS_CSR(portnum), + &err_status); + + if ((err_status & RIO_PORT_N_ERR_STS_PORT_OK) && + (err_status & (RIO_PORT_N_ERR_STS_PW_OUT_ES | + RIO_PORT_N_ERR_STS_PW_INP_ES))) { + /* Remove any queued packets by locking/unlocking port */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + ®val); + if (!(regval & RIO_PORT_N_CTL_LOCKOUT)) { + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval | RIO_PORT_N_CTL_LOCKOUT); + udelay(50); + rio_write_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_CTL_CSR(portnum), + regval); + } + + /* Read from link maintenance response register to clear + * valid bit + */ + rio_read_config_32(rdev, + rdev->phys_efptr + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + + /* Send a Packet-Not-Accepted/Link-Request-Input-Status control + * symbol to recover from IES/OES + */ + sendcount = 3; + while (sendcount) { + rio_write_config_32(rdev, + TSI578_SP_CS_TX(portnum), 0x40fc8000); + checkcount = 3; + while (checkcount--) { + udelay(50); + rio_read_config_32(rdev, + rdev->phys_efptr + + RIO_PORT_N_MNT_RSP_CSR(portnum), + ®val); + if (regval & RIO_PORT_N_MNT_RSP_RVAL) + goto exit_es; + } + + sendcount--; + } + } + +exit_es: + /* Clear implementation specific error status bits */ + rio_read_config_32(rdev, TSI578_SP_INT_STATUS(portnum), &intstat); + pr_debug("TSI578[%x:%x] SP%d_INT_STATUS=0x%08x\n", + rdev->destid, rdev->hopcount, portnum, intstat); + + if (intstat & 0x10000) { + rio_read_config_32(rdev, + TSI578_SP_LUT_PEINF(portnum), ®val); + regval = (mport->sys_size) ? (regval >> 16) : (regval >> 24); + route_port = rdev->rswitch->route_table[regval]; + pr_debug("RIO: TSI578[%s] P%d LUT Parity Error (destID=%d)\n", + rio_name(rdev), portnum, regval); + tsi57x_route_add_entry(mport, rdev->destid, rdev->hopcount, + RIO_GLOBAL_TABLE, regval, route_port); + } + + rio_write_config_32(rdev, TSI578_SP_INT_STATUS(portnum), + intstat & 0x000700bd); + + return 0; +} + +static struct rio_switch_ops tsi57x_switch_ops = { + .owner = THIS_MODULE, + .add_entry = tsi57x_route_add_entry, + .get_entry = tsi57x_route_get_entry, + .clr_table = tsi57x_route_clr_table, + .set_domain = tsi57x_set_domain, + .get_domain = tsi57x_get_domain, + .em_init = tsi57x_em_init, + .em_handle = tsi57x_em_handler, +}; + +static int tsi57x_probe(struct rio_dev *rdev, const struct rio_device_id *id) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + + spin_lock(&rdev->rswitch->lock); + + if (rdev->rswitch->ops) { + spin_unlock(&rdev->rswitch->lock); + return -EINVAL; + } + rdev->rswitch->ops = &tsi57x_switch_ops; + + if (rdev->do_enum) { + /* Ensure that default routing is disabled on startup */ + rio_write_config_32(rdev, RIO_STD_RTE_DEFAULT_PORT, + RIO_INVALID_ROUTE); + } + + spin_unlock(&rdev->rswitch->lock); + return 0; +} + +static void tsi57x_remove(struct rio_dev *rdev) +{ + pr_debug("RIO: %s for %s\n", __func__, rio_name(rdev)); + spin_lock(&rdev->rswitch->lock); + if (rdev->rswitch->ops != &tsi57x_switch_ops) { + spin_unlock(&rdev->rswitch->lock); + return; + } + rdev->rswitch->ops = NULL; + spin_unlock(&rdev->rswitch->lock); +} + +static struct rio_device_id tsi57x_id_table[] = { + {RIO_DEVICE(RIO_DID_TSI572, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI574, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI577, RIO_VID_TUNDRA)}, + {RIO_DEVICE(RIO_DID_TSI578, RIO_VID_TUNDRA)}, + { 0, } /* terminate list */ +}; + +static struct rio_driver tsi57x_driver = { + .name = "tsi57x", + .id_table = tsi57x_id_table, + .probe = tsi57x_probe, + .remove = tsi57x_remove, +}; + +static int __init tsi57x_init(void) +{ + return rio_register_driver(&tsi57x_driver); +} + +static void __exit tsi57x_exit(void) +{ + rio_unregister_driver(&tsi57x_driver); +} + +device_initcall(tsi57x_init); +module_exit(tsi57x_exit); + +MODULE_DESCRIPTION("IDT Tsi57x Serial RapidIO switch family driver"); +MODULE_AUTHOR("Integrated Device Technology, Inc."); +MODULE_LICENSE("GPL"); -- cgit v1.2.3-54-g00ecf