summaryrefslogtreecommitdiff
path: root/drivers/spi
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/spi')
-rw-r--r--drivers/spi/Kconfig672
-rw-r--r--drivers/spi/Makefile91
-rw-r--r--drivers/spi/spi-adi-v3.c984
-rw-r--r--drivers/spi/spi-altera.c294
-rw-r--r--drivers/spi/spi-ath79.c317
-rw-r--r--drivers/spi/spi-atmel.c1540
-rw-r--r--drivers/spi/spi-au1550.c1002
-rw-r--r--drivers/spi/spi-bcm2835.c517
-rw-r--r--drivers/spi/spi-bcm53xx.c299
-rw-r--r--drivers/spi/spi-bcm53xx.h72
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c473
-rw-r--r--drivers/spi/spi-bcm63xx.c480
-rw-r--r--drivers/spi/spi-bfin-sport.c929
-rw-r--r--drivers/spi/spi-bfin5xx.c1473
-rw-r--r--drivers/spi/spi-bitbang-txrx.h107
-rw-r--r--drivers/spi/spi-bitbang.c475
-rw-r--r--drivers/spi/spi-butterfly.c351
-rw-r--r--drivers/spi/spi-cadence.c692
-rw-r--r--drivers/spi/spi-clps711x.c214
-rw-r--r--drivers/spi/spi-coldfire-qspi.c529
-rw-r--r--drivers/spi/spi-davinci.c1126
-rw-r--r--drivers/spi/spi-dln2.c881
-rw-r--r--drivers/spi/spi-dw-mid.c330
-rw-r--r--drivers/spi/spi-dw-mmio.c145
-rw-r--r--drivers/spi/spi-dw-pci.c164
-rw-r--r--drivers/spi/spi-dw.c623
-rw-r--r--drivers/spi/spi-dw.h212
-rw-r--r--drivers/spi/spi-efm32.c505
-rw-r--r--drivers/spi/spi-ep93xx.c976
-rw-r--r--drivers/spi/spi-falcon.c439
-rw-r--r--drivers/spi/spi-fsl-cpm.c406
-rw-r--r--drivers/spi/spi-fsl-cpm.h43
-rw-r--r--drivers/spi/spi-fsl-dspi.c643
-rw-r--r--drivers/spi/spi-fsl-espi.c880
-rw-r--r--drivers/spi/spi-fsl-lib.c183
-rw-r--r--drivers/spi/spi-fsl-lib.h125
-rw-r--r--drivers/spi/spi-fsl-spi.c965
-rw-r--r--drivers/spi/spi-fsl-spi.h72
-rw-r--r--drivers/spi/spi-gpio.c547
-rw-r--r--drivers/spi/spi-img-spfi.c759
-rw-r--r--drivers/spi/spi-imx.c1249
-rw-r--r--drivers/spi/spi-lm70llp.c344
-rw-r--r--drivers/spi/spi-meson-spifc.c462
-rw-r--r--drivers/spi/spi-mpc512x-psc.c610
-rw-r--r--drivers/spi/spi-mpc52xx-psc.c518
-rw-r--r--drivers/spi/spi-mpc52xx.c551
-rw-r--r--drivers/spi/spi-mxs.c583
-rw-r--r--drivers/spi/spi-nuc900.c433
-rw-r--r--drivers/spi/spi-oc-tiny.c363
-rw-r--r--drivers/spi/spi-octeon.c260
-rw-r--r--drivers/spi/spi-omap-100k.c512
-rw-r--r--drivers/spi/spi-omap-uwire.c561
-rw-r--r--drivers/spi/spi-omap2-mcspi.c1535
-rw-r--r--drivers/spi/spi-orion.c572
-rw-r--r--drivers/spi/spi-pl022.c2492
-rw-r--r--drivers/spi/spi-ppc4xx.c585
-rw-r--r--drivers/spi/spi-pxa2xx-dma.c368
-rw-r--r--drivers/spi/spi-pxa2xx-pci.c221
-rw-r--r--drivers/spi/spi-pxa2xx-pxadma.c487
-rw-r--r--drivers/spi/spi-pxa2xx.c1600
-rw-r--r--drivers/spi/spi-pxa2xx.h219
-rw-r--r--drivers/spi/spi-qup.c1050
-rw-r--r--drivers/spi/spi-rockchip.c890
-rw-r--r--drivers/spi/spi-rspi.c1326
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.S116
-rw-r--r--drivers/spi/spi-s3c24xx-fiq.h26
-rw-r--r--drivers/spi/spi-s3c24xx.c673
-rw-r--r--drivers/spi/spi-s3c64xx.c1406
-rw-r--r--drivers/spi/spi-sc18is602.c329
-rw-r--r--drivers/spi/spi-sh-hspi.c321
-rw-r--r--drivers/spi/spi-sh-msiof.c1291
-rw-r--r--drivers/spi/spi-sh-sci.c197
-rw-r--r--drivers/spi/spi-sh.c537
-rw-r--r--drivers/spi/spi-sirf.c838
-rw-r--r--drivers/spi/spi-st-ssc4.c504
-rw-r--r--drivers/spi/spi-sun4i.c476
-rw-r--r--drivers/spi/spi-sun6i.c482
-rw-r--r--drivers/spi/spi-tegra114.c1237
-rw-r--r--drivers/spi/spi-tegra20-sflash.c622
-rw-r--r--drivers/spi/spi-tegra20-slink.c1238
-rw-r--r--drivers/spi/spi-ti-qspi.c600
-rw-r--r--drivers/spi/spi-tle62x0.c321
-rw-r--r--drivers/spi/spi-topcliff-pch.c1754
-rw-r--r--drivers/spi/spi-txx9.c444
-rw-r--r--drivers/spi/spi-xcomm.c253
-rw-r--r--drivers/spi/spi-xilinx.c509
-rw-r--r--drivers/spi/spi-xtensa-xtfpga.c170
-rw-r--r--drivers/spi/spi.c2408
-rw-r--r--drivers/spi/spidev.c856
89 files changed, 56934 insertions, 0 deletions
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
new file mode 100644
index 000000000..72b059081
--- /dev/null
+++ b/drivers/spi/Kconfig
@@ -0,0 +1,672 @@
+#
+# SPI driver configuration
+#
+# NOTE: the reason this doesn't show SPI slave support is mostly that
+# nobody's needed a slave side API yet. The master-role API is not
+# fully appropriate there, so it'd need some thought to do well.
+#
+menuconfig SPI
+ bool "SPI support"
+ depends on HAS_IOMEM
+ help
+ The "Serial Peripheral Interface" is a low level synchronous
+ protocol. Chips that support SPI can have data transfer rates
+ up to several tens of Mbit/sec. Chips are addressed with a
+ controller and a chipselect. Most SPI slaves don't support
+ dynamic device discovery; some are even write-only or read-only.
+
+ SPI is widely used by microcontrollers to talk with sensors,
+ eeprom and flash memory, codecs and various other controller
+ chips, analog to digital (and d-to-a) converters, and more.
+ MMC and SD cards can be accessed using SPI protocol; and for
+ DataFlash cards used in MMC sockets, SPI must always be used.
+
+ SPI is one of a family of similar protocols using a four wire
+ interface (select, clock, data in, data out) including Microwire
+ (half duplex), SSP, SSI, and PSP. This driver framework should
+ work with most such devices and controllers.
+
+if SPI
+
+config SPI_DEBUG
+ bool "Debug support for SPI drivers"
+ depends on DEBUG_KERNEL
+ help
+ Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
+ sysfs, and debugfs support in SPI controller and protocol drivers.
+
+#
+# MASTER side ... talking to discrete SPI slave chips including microcontrollers
+#
+
+config SPI_MASTER
+# bool "SPI Master Support"
+ bool
+ default SPI
+ help
+ If your system has an master-capable SPI controller (which
+ provides the clock and chipselect), you can enable that
+ controller and the protocol drivers for the SPI slave chips
+ that are connected.
+
+if SPI_MASTER
+
+comment "SPI Master Controller Drivers"
+
+config SPI_ALTERA
+ tristate "Altera SPI Controller"
+ select SPI_BITBANG
+ help
+ This is the driver for the Altera SPI Controller.
+
+config SPI_ATH79
+ tristate "Atheros AR71XX/AR724X/AR913X SPI controller driver"
+ depends on ATH79 && GPIOLIB
+ select SPI_BITBANG
+ help
+ This enables support for the SPI controller present on the
+ Atheros AR71XX/AR724X/AR913X SoCs.
+
+config SPI_ATMEL
+ tristate "Atmel SPI Controller"
+ depends on HAS_DMA
+ depends on (ARCH_AT91 || AVR32 || COMPILE_TEST)
+ help
+ This selects a driver for the Atmel SPI Controller, present on
+ many AT32 (AVR32) and AT91 (ARM) chips.
+
+config SPI_BCM2835
+ tristate "BCM2835 SPI controller"
+ depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on GPIOLIB
+ help
+ This selects a driver for the Broadcom BCM2835 SPI master.
+
+ The BCM2835 contains two types of SPI master controller; the
+ "universal SPI master", and the regular SPI controller. This driver
+ is for the regular SPI controller. Slave mode operation is not also
+ not supported.
+
+config SPI_BFIN5XX
+ tristate "SPI controller driver for ADI Blackfin5xx"
+ depends on BLACKFIN && !BF60x
+ help
+ This is the SPI controller master driver for Blackfin 5xx processor.
+
+config SPI_ADI_V3
+ tristate "SPI controller v3 for ADI"
+ depends on BF60x
+ help
+ This is the SPI controller v3 master driver
+ found on Blackfin 60x processor.
+
+config SPI_BFIN_SPORT
+ tristate "SPI bus via Blackfin SPORT"
+ depends on BLACKFIN
+ help
+ Enable support for a SPI bus via the Blackfin SPORT peripheral.
+
+config SPI_AU1550
+ tristate "Au1550/Au1200/Au1300 SPI Controller"
+ depends on MIPS_ALCHEMY
+ select SPI_BITBANG
+ help
+ If you say yes to this option, support will be included for the
+ PSC SPI controller found on Au1550, Au1200 and Au1300 series.
+
+config SPI_BCM53XX
+ tristate "Broadcom BCM53xx SPI controller"
+ depends on ARCH_BCM_5301X
+ depends on BCMA_POSSIBLE
+ select BCMA
+ help
+ Enable support for the SPI controller on Broadcom BCM53xx ARM SoCs.
+
+config SPI_BCM63XX
+ tristate "Broadcom BCM63xx SPI controller"
+ depends on BCM63XX
+ help
+ Enable support for the SPI controller on the Broadcom BCM63xx SoCs.
+
+config SPI_BCM63XX_HSSPI
+ tristate "Broadcom BCM63XX HS SPI controller driver"
+ depends on BCM63XX || COMPILE_TEST
+ help
+ This enables support for the High Speed SPI controller present on
+ newer Broadcom BCM63XX SoCs.
+
+config SPI_BITBANG
+ tristate "Utilities for Bitbanging SPI masters"
+ help
+ With a few GPIO pins, your system can bitbang the SPI protocol.
+ Select this to get SPI support through I/O pins (GPIO, parallel
+ port, etc). Or, some systems' SPI master controller drivers use
+ this code to manage the per-word or per-transfer accesses to the
+ hardware shift registers.
+
+ This is library code, and is automatically selected by drivers that
+ need it. You only need to select this explicitly to support driver
+ modules that aren't part of this kernel tree.
+
+config SPI_BUTTERFLY
+ tristate "Parallel port adapter for AVR Butterfly (DEVELOPMENT)"
+ depends on PARPORT
+ select SPI_BITBANG
+ help
+ This uses a custom parallel port cable to connect to an AVR
+ Butterfly <http://www.atmel.com/products/avr/butterfly>, an
+ inexpensive battery powered microcontroller evaluation board.
+ This same cable can be used to flash new firmware.
+
+config SPI_CADENCE
+ tristate "Cadence SPI controller"
+ help
+ This selects the Cadence SPI controller master driver
+ used by Xilinx Zynq and ZynqMP.
+
+config SPI_CLPS711X
+ tristate "CLPS711X host SPI controller"
+ depends on ARCH_CLPS711X || COMPILE_TEST
+ help
+ This enables dedicated general purpose SPI/Microwire1-compatible
+ master mode interface (SSI1) for CLPS711X-based CPUs.
+
+config SPI_COLDFIRE_QSPI
+ tristate "Freescale Coldfire QSPI controller"
+ depends on (M520x || M523x || M5249 || M525x || M527x || M528x || M532x)
+ help
+ This enables support for the Coldfire QSPI controller in master
+ mode.
+
+config SPI_DAVINCI
+ tristate "Texas Instruments DaVinci/DA8x/OMAP-L/AM1x SoC SPI controller"
+ depends on ARCH_DAVINCI || ARCH_KEYSTONE
+ select SPI_BITBANG
+ help
+ SPI master controller for DaVinci/DA8x/OMAP-L/AM1x SPI modules.
+
+config SPI_DLN2
+ tristate "Diolan DLN-2 USB SPI adapter"
+ depends on MFD_DLN2
+ help
+ If you say yes to this option, support will be included for Diolan
+ DLN2, a USB to SPI interface.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi-dln2.
+
+config SPI_EFM32
+ tristate "EFM32 SPI controller"
+ depends on OF && ARM && (ARCH_EFM32 || COMPILE_TEST)
+ select SPI_BITBANG
+ help
+ Driver for the spi controller found on Energy Micro's EFM32 SoCs.
+
+config SPI_EP93XX
+ tristate "Cirrus Logic EP93xx SPI controller"
+ depends on HAS_DMA
+ depends on ARCH_EP93XX || COMPILE_TEST
+ help
+ This enables using the Cirrus EP93xx SPI controller in master
+ mode.
+
+config SPI_FALCON
+ tristate "Falcon SPI controller support"
+ depends on SOC_FALCON
+ help
+ The external bus unit (EBU) found on the FALC-ON SoC has SPI
+ emulation that is designed for serial flash access. This driver
+ has only been tested with m25p80 type chips. The hardware has no
+ support for other types of SPI peripherals.
+
+config SPI_GPIO
+ tristate "GPIO-based bitbanging SPI Master"
+ depends on GPIOLIB
+ select SPI_BITBANG
+ help
+ This simple GPIO bitbanging SPI master uses the arch-neutral GPIO
+ interface to manage MOSI, MISO, SCK, and chipselect signals. SPI
+ slaves connected to a bus using this driver are configured as usual,
+ except that the spi_board_info.controller_data holds the GPIO number
+ for the chipselect used by this controller driver.
+
+ Note that this driver often won't achieve even 1 Mbit/sec speeds,
+ making it unusually slow for SPI. If your platform can inline
+ GPIO operations, you should be able to leverage that for better
+ speed with a custom version of this driver; see the source code.
+
+config SPI_IMG_SPFI
+ tristate "IMG SPFI controller"
+ depends on MIPS || COMPILE_TEST
+ help
+ This enables support for the SPFI master controller found on
+ IMG SoCs.
+
+config SPI_IMX
+ tristate "Freescale i.MX SPI controllers"
+ depends on ARCH_MXC || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ This enables using the Freescale i.MX SPI controllers in master
+ mode.
+
+config SPI_LM70_LLP
+ tristate "Parallel port adapter for LM70 eval board (DEVELOPMENT)"
+ depends on PARPORT
+ select SPI_BITBANG
+ help
+ This driver supports the NS LM70 LLP Evaluation Board,
+ which interfaces to an LM70 temperature sensor using
+ a parallel port.
+
+config SPI_MPC52xx
+ tristate "Freescale MPC52xx SPI (non-PSC) controller support"
+ depends on PPC_MPC52xx
+ help
+ This drivers supports the MPC52xx SPI controller in master SPI
+ mode.
+
+config SPI_MPC52xx_PSC
+ tristate "Freescale MPC52xx PSC SPI controller"
+ depends on PPC_MPC52xx
+ help
+ This enables using the Freescale MPC52xx Programmable Serial
+ Controller in master SPI mode.
+
+config SPI_MPC512x_PSC
+ tristate "Freescale MPC512x PSC SPI controller"
+ depends on PPC_MPC512x
+ help
+ This enables using the Freescale MPC5121 Programmable Serial
+ Controller in SPI master mode.
+
+config SPI_FSL_LIB
+ tristate
+ depends on OF
+
+config SPI_FSL_CPM
+ tristate
+ depends on FSL_SOC
+
+config SPI_FSL_SPI
+ tristate "Freescale SPI controller and Aeroflex Gaisler GRLIB SPI controller"
+ depends on OF
+ select SPI_FSL_LIB
+ select SPI_FSL_CPM if FSL_SOC
+ help
+ This enables using the Freescale SPI controllers in master mode.
+ MPC83xx platform uses the controller in cpu mode or CPM/QE mode.
+ MPC8569 uses the controller in QE mode, MPC8610 in cpu mode.
+ This also enables using the Aeroflex Gaisler GRLIB SPI controller in
+ master mode.
+
+config SPI_FSL_DSPI
+ tristate "Freescale DSPI controller"
+ select REGMAP_MMIO
+ depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
+ help
+ This enables support for the Freescale DSPI controller in master
+ mode. VF610 platform uses the controller.
+
+config SPI_FSL_ESPI
+ tristate "Freescale eSPI controller"
+ depends on FSL_SOC
+ select SPI_FSL_LIB
+ help
+ This enables using the Freescale eSPI controllers in master mode.
+ From MPC8536, 85xx platform uses the controller, and all P10xx,
+ P20xx, P30xx,P40xx, P50xx uses this controller.
+
+config SPI_MESON_SPIFC
+ tristate "Amlogic Meson SPIFC controller"
+ depends on ARCH_MESON || COMPILE_TEST
+ select REGMAP_MMIO
+ help
+ This enables master mode support for the SPIFC (SPI flash
+ controller) available in Amlogic Meson SoCs.
+
+config SPI_OC_TINY
+ tristate "OpenCores tiny SPI"
+ depends on GPIOLIB
+ select SPI_BITBANG
+ help
+ This is the driver for OpenCores tiny SPI master controller.
+
+config SPI_OCTEON
+ tristate "Cavium OCTEON SPI controller"
+ depends on CAVIUM_OCTEON_SOC
+ help
+ SPI host driver for the hardware found on some Cavium OCTEON
+ SOCs.
+
+config SPI_OMAP_UWIRE
+ tristate "OMAP1 MicroWire"
+ depends on ARCH_OMAP1
+ select SPI_BITBANG
+ help
+ This hooks up to the MicroWire controller on OMAP1 chips.
+
+config SPI_OMAP24XX
+ tristate "McSPI driver for OMAP"
+ depends on HAS_DMA
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ SPI master controller for OMAP24XX and later Multichannel SPI
+ (McSPI) modules.
+
+config SPI_TI_QSPI
+ tristate "DRA7xxx QSPI controller support"
+ depends on ARCH_OMAP2PLUS || COMPILE_TEST
+ help
+ QSPI master controller for DRA7xxx used for flash devices.
+ This device supports single, dual and quad read support, while
+ it only supports single write mode.
+
+config SPI_OMAP_100K
+ tristate "OMAP SPI 100K"
+ depends on ARCH_OMAP850 || ARCH_OMAP730 || COMPILE_TEST
+ help
+ OMAP SPI 100K master controller for omap7xx boards.
+
+config SPI_ORION
+ tristate "Orion SPI master"
+ depends on PLAT_ORION || COMPILE_TEST
+ help
+ This enables using the SPI master controller on the Orion chips.
+
+config SPI_PL022
+ tristate "ARM AMBA PL022 SSP controller"
+ depends on ARM_AMBA
+ default y if MACH_U300
+ default y if ARCH_REALVIEW
+ default y if INTEGRATOR_IMPD1
+ default y if ARCH_VERSATILE
+ help
+ This selects the ARM(R) AMBA(R) PrimeCell PL022 SSP
+ controller. If you have an embedded system with an AMBA(R)
+ bus and a PL022 controller, say Y or M here.
+
+config SPI_PPC4xx
+ tristate "PPC4xx SPI Controller"
+ depends on PPC32 && 4xx
+ select SPI_BITBANG
+ help
+ This selects a driver for the PPC4xx SPI Controller.
+
+config SPI_PXA2XX_PXADMA
+ bool "PXA2xx SSP legacy PXA DMA API support"
+ depends on SPI_PXA2XX && ARCH_PXA
+ help
+ Enable PXA private legacy DMA API support. Note that this is
+ deprecated in favor of generic DMA engine API.
+
+config SPI_PXA2XX_DMA
+ def_bool y
+ depends on SPI_PXA2XX && !SPI_PXA2XX_PXADMA
+
+config SPI_PXA2XX
+ tristate "PXA2xx SSP SPI master"
+ depends on (ARCH_PXA || PCI || ACPI)
+ select PXA_SSP if ARCH_PXA
+ help
+ This enables using a PXA2xx or Sodaville SSP port as a SPI master
+ controller. The driver can be configured to use any SSP port and
+ additional documentation can be found a Documentation/spi/pxa2xx.
+
+config SPI_PXA2XX_PCI
+ def_tristate SPI_PXA2XX && PCI && COMMON_CLK
+
+config SPI_ROCKCHIP
+ tristate "Rockchip SPI controller driver"
+ depends on ARM || ARM64 || AVR32 || HEXAGON || MIPS || SUPERH
+ help
+ This selects a driver for Rockchip SPI controller.
+
+ If you say yes to this option, support will be included for
+ RK3066, RK3188 and RK3288 families of SPI controller.
+ Rockchip SPI controller support DMA transport and PIO mode.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_RSPI
+ tristate "Renesas RSPI/QSPI controller"
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ help
+ SPI driver for Renesas RSPI and QSPI blocks.
+
+config SPI_QUP
+ tristate "Qualcomm SPI controller with QUP interface"
+ depends on ARCH_QCOM || (ARM && COMPILE_TEST)
+ help
+ Qualcomm Universal Peripheral (QUP) core is an AHB slave that
+ provides a common data path (an output FIFO and an input FIFO)
+ for serial peripheral interface (SPI) mini-core. SPI in master
+ mode supports up to 50MHz, up to four chip selects, programmable
+ data path from 4 bits to 32 bits and numerous protocol variants.
+
+ This driver can also be built as a module. If so, the module
+ will be called spi_qup.
+
+config SPI_S3C24XX
+ tristate "Samsung S3C24XX series SPI"
+ depends on ARCH_S3C24XX
+ select SPI_BITBANG
+ help
+ SPI driver for Samsung S3C24XX series ARM SoCs
+
+config SPI_S3C24XX_FIQ
+ bool "S3C24XX driver with FIQ pseudo-DMA"
+ depends on SPI_S3C24XX
+ select FIQ
+ help
+ Enable FIQ support for the S3C24XX SPI driver to provide pseudo
+ DMA by using the fast-interrupt request framework, This allows
+ the driver to get DMA-like performance when there are either
+ no free DMA channels, or when doing transfers that required both
+ TX and RX data paths.
+
+config SPI_S3C64XX
+ tristate "Samsung S3C64XX series type SPI"
+ depends on (PLAT_SAMSUNG || ARCH_EXYNOS)
+ help
+ SPI driver for Samsung S3C64XX and newer SoCs.
+
+config SPI_SC18IS602
+ tristate "NXP SC18IS602/602B/603 I2C to SPI bridge"
+ depends on I2C
+ help
+ SPI driver for NXP SC18IS602/602B/603 I2C to SPI bridge.
+
+config SPI_SH_MSIOF
+ tristate "SuperH MSIOF SPI controller"
+ depends on HAVE_CLK && HAS_DMA
+ depends on SUPERH || ARCH_SHMOBILE || COMPILE_TEST
+ help
+ SPI driver for SuperH and SH Mobile MSIOF blocks.
+
+config SPI_SH
+ tristate "SuperH SPI controller"
+ depends on SUPERH || COMPILE_TEST
+ help
+ SPI driver for SuperH SPI blocks.
+
+config SPI_SH_SCI
+ tristate "SuperH SCI SPI controller"
+ depends on SUPERH
+ select SPI_BITBANG
+ help
+ SPI driver for SuperH SCI blocks.
+
+config SPI_SH_HSPI
+ tristate "SuperH HSPI controller"
+ depends on ARCH_SHMOBILE || COMPILE_TEST
+ help
+ SPI driver for SuperH HSPI blocks.
+
+config SPI_SIRF
+ tristate "CSR SiRFprimaII SPI controller"
+ depends on SIRF_DMA
+ select SPI_BITBANG
+ help
+ SPI driver for CSR SiRFprimaII SoCs
+
+config SPI_ST_SSC4
+ tristate "STMicroelectronics SPI SSC-based driver"
+ depends on ARCH_STI
+ help
+ STMicroelectronics SoCs support for SPI. If you say yes to
+ this option, support will be included for the SSC driven SPI.
+
+config SPI_SUN4I
+ tristate "Allwinner A10 SoCs SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ help
+ SPI driver for Allwinner sun4i, sun5i and sun7i SoCs
+
+config SPI_SUN6I
+ tristate "Allwinner A31 SPI controller"
+ depends on ARCH_SUNXI || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ This enables using the SPI controller on the Allwinner A31 SoCs.
+
+config SPI_MXS
+ tristate "Freescale MXS SPI controller"
+ depends on ARCH_MXS
+ select STMP_DEVICE
+ help
+ SPI driver for Freescale MXS devices.
+
+config SPI_TEGRA114
+ tristate "NVIDIA Tegra114 SPI Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER && HAS_DMA
+ help
+ SPI driver for NVIDIA Tegra114 SPI Controller interface. This controller
+ is different than the older SoCs SPI controller and also register interface
+ get changed with this controller.
+
+config SPI_TEGRA20_SFLASH
+ tristate "Nvidia Tegra20 Serial flash Controller"
+ depends on ARCH_TEGRA || COMPILE_TEST
+ depends on RESET_CONTROLLER
+ help
+ SPI driver for Nvidia Tegra20 Serial flash Controller interface.
+ The main usecase of this controller is to use spi flash as boot
+ device.
+
+config SPI_TEGRA20_SLINK
+ tristate "Nvidia Tegra20/Tegra30 SLINK Controller"
+ depends on (ARCH_TEGRA && TEGRA20_APB_DMA) || COMPILE_TEST
+ depends on RESET_CONTROLLER && HAS_DMA
+ help
+ SPI driver for Nvidia Tegra20/Tegra30 SLINK Controller interface.
+
+config SPI_TOPCLIFF_PCH
+ tristate "Intel EG20T PCH/LAPIS Semicon IOH(ML7213/ML7223/ML7831) SPI"
+ depends on PCI && (X86_32 || COMPILE_TEST)
+ help
+ SPI driver for the Topcliff PCH (Platform Controller Hub) SPI bus
+ used in some x86 embedded processors.
+
+ This driver also supports the ML7213/ML7223/ML7831, a companion chip
+ for the Atom E6xx series and compatible with the Intel EG20T PCH.
+
+config SPI_TXX9
+ tristate "Toshiba TXx9 SPI controller"
+ depends on GPIOLIB && (CPU_TX49XX || COMPILE_TEST)
+ help
+ SPI driver for Toshiba TXx9 MIPS SoCs
+
+config SPI_XCOMM
+ tristate "Analog Devices AD-FMCOMMS1-EBZ SPI-I2C-bridge driver"
+ depends on I2C
+ help
+ Support for the SPI-I2C bridge found on the Analog Devices
+ AD-FMCOMMS1-EBZ board.
+
+config SPI_XILINX
+ tristate "Xilinx SPI controller common module"
+ depends on HAS_IOMEM
+ select SPI_BITBANG
+ help
+ This exposes the SPI controller IP from the Xilinx EDK.
+
+ See the "OPB Serial Peripheral Interface (SPI) (v1.00e)"
+ Product Specification document (DS464) for hardware details.
+
+ Or for the DS570, see "XPS Serial Peripheral Interface (SPI) (v2.00b)"
+
+config SPI_XTENSA_XTFPGA
+ tristate "Xtensa SPI controller for xtfpga"
+ depends on (XTENSA && XTENSA_PLATFORM_XTFPGA) || COMPILE_TEST
+ select SPI_BITBANG
+ help
+ SPI driver for xtfpga SPI master controller.
+
+ This simple SPI master controller is built into xtfpga bitstreams
+ and is used to control daughterboard audio codec. It always transfers
+ 16 bit words in SPI mode 0, automatically asserting CS on transfer
+ start and deasserting on end.
+
+config SPI_NUC900
+ tristate "Nuvoton NUC900 series SPI"
+ depends on ARCH_W90X900
+ select SPI_BITBANG
+ help
+ SPI driver for Nuvoton NUC900 series ARM SoCs
+
+#
+# Add new SPI master controllers in alphabetical order above this line
+#
+
+config SPI_DESIGNWARE
+ tristate "DesignWare SPI controller core support"
+ help
+ general driver for SPI controller core from DesignWare
+
+config SPI_DW_PCI
+ tristate "PCI interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE && PCI
+
+config SPI_DW_MID_DMA
+ bool "DMA support for DW SPI controller on Intel MID platform"
+ depends on SPI_DW_PCI && DW_DMAC_PCI
+
+config SPI_DW_MMIO
+ tristate "Memory-mapped io interface driver for DW SPI core"
+ depends on SPI_DESIGNWARE
+
+#
+# There are lots of SPI device types, with sensors and memory
+# being probably the most widely used ones.
+#
+comment "SPI Protocol Masters"
+
+config SPI_SPIDEV
+ tristate "User mode SPI device driver support"
+ help
+ This supports user mode SPI protocol drivers.
+
+ Note that this application programming interface is EXPERIMENTAL
+ and hence SUBJECT TO CHANGE WITHOUT NOTICE while it stabilizes.
+
+config SPI_TLE62X0
+ tristate "Infineon TLE62X0 (for power switching)"
+ depends on SYSFS
+ help
+ SPI driver for Infineon TLE62X0 series line driver chips,
+ such as the TLE6220, TLE6230 and TLE6240. This provides a
+ sysfs interface, with each line presented as a kind of GPIO
+ exposing both switch control and diagnostic feedback.
+
+#
+# Add new SPI protocol masters in alphabetical order above this line
+#
+
+endif # SPI_MASTER
+
+# (slave support would go here)
+
+endif # SPI
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
new file mode 100644
index 000000000..d8cbf6549
--- /dev/null
+++ b/drivers/spi/Makefile
@@ -0,0 +1,91 @@
+#
+# Makefile for kernel SPI drivers.
+#
+
+ccflags-$(CONFIG_SPI_DEBUG) := -DDEBUG
+
+# small core, mostly translating board-specific
+# config declarations into driver model code
+obj-$(CONFIG_SPI_MASTER) += spi.o
+obj-$(CONFIG_SPI_SPIDEV) += spidev.o
+
+# SPI master controller drivers (bus)
+obj-$(CONFIG_SPI_ALTERA) += spi-altera.o
+obj-$(CONFIG_SPI_ATMEL) += spi-atmel.o
+obj-$(CONFIG_SPI_ATH79) += spi-ath79.o
+obj-$(CONFIG_SPI_AU1550) += spi-au1550.o
+obj-$(CONFIG_SPI_BCM2835) += spi-bcm2835.o
+obj-$(CONFIG_SPI_BCM53XX) += spi-bcm53xx.o
+obj-$(CONFIG_SPI_BCM63XX) += spi-bcm63xx.o
+obj-$(CONFIG_SPI_BCM63XX_HSSPI) += spi-bcm63xx-hsspi.o
+obj-$(CONFIG_SPI_BFIN5XX) += spi-bfin5xx.o
+obj-$(CONFIG_SPI_ADI_V3) += spi-adi-v3.o
+obj-$(CONFIG_SPI_BFIN_SPORT) += spi-bfin-sport.o
+obj-$(CONFIG_SPI_BITBANG) += spi-bitbang.o
+obj-$(CONFIG_SPI_BUTTERFLY) += spi-butterfly.o
+obj-$(CONFIG_SPI_CADENCE) += spi-cadence.o
+obj-$(CONFIG_SPI_CLPS711X) += spi-clps711x.o
+obj-$(CONFIG_SPI_COLDFIRE_QSPI) += spi-coldfire-qspi.o
+obj-$(CONFIG_SPI_DAVINCI) += spi-davinci.o
+obj-$(CONFIG_SPI_DLN2) += spi-dln2.o
+obj-$(CONFIG_SPI_DESIGNWARE) += spi-dw.o
+obj-$(CONFIG_SPI_DW_MMIO) += spi-dw-mmio.o
+obj-$(CONFIG_SPI_DW_PCI) += spi-dw-midpci.o
+spi-dw-midpci-objs := spi-dw-pci.o spi-dw-mid.o
+obj-$(CONFIG_SPI_EFM32) += spi-efm32.o
+obj-$(CONFIG_SPI_EP93XX) += spi-ep93xx.o
+obj-$(CONFIG_SPI_FALCON) += spi-falcon.o
+obj-$(CONFIG_SPI_FSL_CPM) += spi-fsl-cpm.o
+obj-$(CONFIG_SPI_FSL_DSPI) += spi-fsl-dspi.o
+obj-$(CONFIG_SPI_FSL_LIB) += spi-fsl-lib.o
+obj-$(CONFIG_SPI_FSL_ESPI) += spi-fsl-espi.o
+obj-$(CONFIG_SPI_FSL_SPI) += spi-fsl-spi.o
+obj-$(CONFIG_SPI_GPIO) += spi-gpio.o
+obj-$(CONFIG_SPI_IMG_SPFI) += spi-img-spfi.o
+obj-$(CONFIG_SPI_IMX) += spi-imx.o
+obj-$(CONFIG_SPI_LM70_LLP) += spi-lm70llp.o
+obj-$(CONFIG_SPI_MESON_SPIFC) += spi-meson-spifc.o
+obj-$(CONFIG_SPI_MPC512x_PSC) += spi-mpc512x-psc.o
+obj-$(CONFIG_SPI_MPC52xx_PSC) += spi-mpc52xx-psc.o
+obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
+obj-$(CONFIG_SPI_MXS) += spi-mxs.o
+obj-$(CONFIG_SPI_NUC900) += spi-nuc900.o
+obj-$(CONFIG_SPI_OC_TINY) += spi-oc-tiny.o
+obj-$(CONFIG_SPI_OCTEON) += spi-octeon.o
+obj-$(CONFIG_SPI_OMAP_UWIRE) += spi-omap-uwire.o
+obj-$(CONFIG_SPI_OMAP_100K) += spi-omap-100k.o
+obj-$(CONFIG_SPI_OMAP24XX) += spi-omap2-mcspi.o
+obj-$(CONFIG_SPI_TI_QSPI) += spi-ti-qspi.o
+obj-$(CONFIG_SPI_ORION) += spi-orion.o
+obj-$(CONFIG_SPI_PL022) += spi-pl022.o
+obj-$(CONFIG_SPI_PPC4xx) += spi-ppc4xx.o
+spi-pxa2xx-platform-objs := spi-pxa2xx.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_PXADMA) += spi-pxa2xx-pxadma.o
+spi-pxa2xx-platform-$(CONFIG_SPI_PXA2XX_DMA) += spi-pxa2xx-dma.o
+obj-$(CONFIG_SPI_PXA2XX) += spi-pxa2xx-platform.o
+obj-$(CONFIG_SPI_PXA2XX_PCI) += spi-pxa2xx-pci.o
+obj-$(CONFIG_SPI_QUP) += spi-qup.o
+obj-$(CONFIG_SPI_ROCKCHIP) += spi-rockchip.o
+obj-$(CONFIG_SPI_RSPI) += spi-rspi.o
+obj-$(CONFIG_SPI_S3C24XX) += spi-s3c24xx-hw.o
+spi-s3c24xx-hw-y := spi-s3c24xx.o
+spi-s3c24xx-hw-$(CONFIG_SPI_S3C24XX_FIQ) += spi-s3c24xx-fiq.o
+obj-$(CONFIG_SPI_S3C64XX) += spi-s3c64xx.o
+obj-$(CONFIG_SPI_SC18IS602) += spi-sc18is602.o
+obj-$(CONFIG_SPI_SH) += spi-sh.o
+obj-$(CONFIG_SPI_SH_HSPI) += spi-sh-hspi.o
+obj-$(CONFIG_SPI_SH_MSIOF) += spi-sh-msiof.o
+obj-$(CONFIG_SPI_SH_SCI) += spi-sh-sci.o
+obj-$(CONFIG_SPI_SIRF) += spi-sirf.o
+obj-$(CONFIG_SPI_ST_SSC4) += spi-st-ssc4.o
+obj-$(CONFIG_SPI_SUN4I) += spi-sun4i.o
+obj-$(CONFIG_SPI_SUN6I) += spi-sun6i.o
+obj-$(CONFIG_SPI_TEGRA114) += spi-tegra114.o
+obj-$(CONFIG_SPI_TEGRA20_SFLASH) += spi-tegra20-sflash.o
+obj-$(CONFIG_SPI_TEGRA20_SLINK) += spi-tegra20-slink.o
+obj-$(CONFIG_SPI_TLE62X0) += spi-tle62x0.o
+obj-$(CONFIG_SPI_TOPCLIFF_PCH) += spi-topcliff-pch.o
+obj-$(CONFIG_SPI_TXX9) += spi-txx9.o
+obj-$(CONFIG_SPI_XCOMM) += spi-xcomm.o
+obj-$(CONFIG_SPI_XILINX) += spi-xilinx.o
+obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o
diff --git a/drivers/spi/spi-adi-v3.c b/drivers/spi/spi-adi-v3.c
new file mode 100644
index 000000000..a16b25dcd
--- /dev/null
+++ b/drivers/spi/spi-adi-v3.c
@@ -0,0 +1,984 @@
+/*
+ * Analog Devices SPI3 controller driver
+ *
+ * Copyright (c) 2014 Analog Devices Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/errno.h>
+#include <linux/gpio.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/adi_spi3.h>
+#include <linux/types.h>
+
+#include <asm/dma.h>
+#include <asm/portmux.h>
+
+enum adi_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE
+};
+
+struct adi_spi_master;
+
+struct adi_spi_transfer_ops {
+ void (*write) (struct adi_spi_master *);
+ void (*read) (struct adi_spi_master *);
+ void (*duplex) (struct adi_spi_master *);
+};
+
+/* runtime info for spi master */
+struct adi_spi_master {
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct adi_spi_regs __iomem *regs;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct adi_spi_device *cur_chip;
+ unsigned transfer_len;
+
+ /* transfer buffer */
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+
+ /* dma info */
+ unsigned int tx_dma;
+ unsigned int rx_dma;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+ unsigned long dummy_buffer; /* used in unidirectional transfer */
+ unsigned long tx_dma_size;
+ unsigned long rx_dma_size;
+ int tx_num;
+ int rx_num;
+
+ /* store register value for suspend/resume */
+ u32 control;
+ u32 ssel;
+
+ unsigned long sclk;
+ enum adi_spi_state state;
+
+ const struct adi_spi_transfer_ops *ops;
+};
+
+struct adi_spi_device {
+ u32 control;
+ u32 clock;
+ u32 ssel;
+
+ u8 cs;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u32 tx_dummy_val; /* tx value for rx only transfer */
+ bool enable_dma;
+ const struct adi_spi_transfer_ops *ops;
+};
+
+static void adi_spi_enable(struct adi_spi_master *drv_data)
+{
+ u32 ctl;
+
+ ctl = ioread32(&drv_data->regs->control);
+ ctl |= SPI_CTL_EN;
+ iowrite32(ctl, &drv_data->regs->control);
+}
+
+static void adi_spi_disable(struct adi_spi_master *drv_data)
+{
+ u32 ctl;
+
+ ctl = ioread32(&drv_data->regs->control);
+ ctl &= ~SPI_CTL_EN;
+ iowrite32(ctl, &drv_data->regs->control);
+}
+
+/* Caculate the SPI_CLOCK register value based on input HZ */
+static u32 hz_to_spi_clock(u32 sclk, u32 speed_hz)
+{
+ u32 spi_clock = sclk / speed_hz;
+
+ if (spi_clock)
+ spi_clock--;
+ return spi_clock;
+}
+
+static int adi_spi_flush(struct adi_spi_master *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ /* wait for stop and clear stat */
+ while (!(ioread32(&drv_data->regs->status) & SPI_STAT_SPIF) && --limit)
+ cpu_relax();
+
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+
+ return limit;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void adi_spi_cs_active(struct adi_spi_master *drv_data, struct adi_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS)) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg &= ~chip->ssel;
+ iowrite32(reg, &drv_data->regs->ssel);
+ } else {
+ gpio_set_value(chip->cs_gpio, 0);
+ }
+}
+
+static void adi_spi_cs_deactive(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (likely(chip->cs < MAX_CTRL_CS)) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg |= chip->ssel;
+ iowrite32(reg, &drv_data->regs->ssel);
+ } else {
+ gpio_set_value(chip->cs_gpio, 1);
+ }
+
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
+static inline void adi_spi_cs_enable(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg |= chip->ssel >> 8;
+ iowrite32(reg, &drv_data->regs->ssel);
+ }
+}
+
+static inline void adi_spi_cs_disable(struct adi_spi_master *drv_data,
+ struct adi_spi_device *chip)
+{
+ if (chip->cs < MAX_CTRL_CS) {
+ u32 reg;
+ reg = ioread32(&drv_data->regs->ssel);
+ reg &= ~(chip->ssel >> 8);
+ iowrite32(reg, &drv_data->regs->ssel);
+ }
+}
+
+/* stop controller and re-config current chip*/
+static void adi_spi_restore_state(struct adi_spi_master *drv_data)
+{
+ struct adi_spi_device *chip = drv_data->cur_chip;
+
+ /* Clear status and disable clock */
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+ iowrite32(0x0, &drv_data->regs->rx_control);
+ iowrite32(0x0, &drv_data->regs->tx_control);
+ adi_spi_disable(drv_data);
+
+ /* Load the registers */
+ iowrite32(chip->control, &drv_data->regs->control);
+ iowrite32(chip->clock, &drv_data->regs->clock);
+
+ adi_spi_enable(drv_data);
+ drv_data->tx_num = drv_data->rx_num = 0;
+ /* we always choose tx transfer initiate */
+ iowrite32(SPI_RXCTL_REN, &drv_data->regs->rx_control);
+ iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI, &drv_data->regs->tx_control);
+ adi_spi_cs_active(drv_data, chip);
+}
+
+/* discard invalid rx data and empty rfifo */
+static inline void dummy_read(struct adi_spi_master *drv_data)
+{
+ while (!(ioread32(&drv_data->regs->status) & SPI_STAT_RFE))
+ ioread32(&drv_data->regs->rfifo);
+}
+
+static void adi_spi_u8_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u8_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u8_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u8 *)(drv_data->tx++), &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u8 *)(drv_data->rx++) = ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u8 = {
+ .write = adi_spi_u8_write,
+ .read = adi_spi_u8_read,
+ .duplex = adi_spi_u8_duplex,
+};
+
+static void adi_spi_u16_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 2;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u16_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static void adi_spi_u16_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u16 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 2;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u16 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 2;
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u16 = {
+ .write = adi_spi_u16_write,
+ .read = adi_spi_u16_read,
+ .duplex = adi_spi_u16_duplex,
+};
+
+static void adi_spi_u32_write(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->tx < drv_data->tx_end) {
+ iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 4;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ ioread32(&drv_data->regs->rfifo);
+ }
+}
+
+static void adi_spi_u32_read(struct adi_spi_master *drv_data)
+{
+ u32 tx_val = drv_data->cur_chip->tx_dummy_val;
+
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(tx_val, &drv_data->regs->tfifo);
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static void adi_spi_u32_duplex(struct adi_spi_master *drv_data)
+{
+ dummy_read(drv_data);
+ while (drv_data->rx < drv_data->rx_end) {
+ iowrite32(*(u32 *)drv_data->tx, &drv_data->regs->tfifo);
+ drv_data->tx += 4;
+ while (ioread32(&drv_data->regs->status) & SPI_STAT_RFE)
+ cpu_relax();
+ *(u32 *)drv_data->rx = ioread32(&drv_data->regs->rfifo);
+ drv_data->rx += 4;
+ }
+}
+
+static const struct adi_spi_transfer_ops adi_spi_transfer_ops_u32 = {
+ .write = adi_spi_u32_write,
+ .read = adi_spi_u32_read,
+ .duplex = adi_spi_u32_duplex,
+};
+
+
+/* test if there is more transfer to be done */
+static void adi_spi_next_transfer(struct adi_spi_master *drv)
+{
+ struct spi_message *msg = drv->cur_msg;
+ struct spi_transfer *t = drv->cur_transfer;
+
+ /* Move to next transfer */
+ if (t->transfer_list.next != &msg->transfers) {
+ drv->cur_transfer = list_entry(t->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ drv->state = RUNNING_STATE;
+ } else {
+ drv->state = DONE_STATE;
+ drv->cur_transfer = NULL;
+ }
+}
+
+static void adi_spi_giveback(struct adi_spi_master *drv_data)
+{
+ struct adi_spi_device *chip = drv_data->cur_chip;
+
+ adi_spi_cs_deactive(drv_data, chip);
+ spi_finalize_current_message(drv_data->master);
+}
+
+static int adi_spi_setup_transfer(struct adi_spi_master *drv)
+{
+ struct spi_transfer *t = drv->cur_transfer;
+ u32 cr, cr_width;
+
+ if (t->tx_buf) {
+ drv->tx = (void *)t->tx_buf;
+ drv->tx_end = drv->tx + t->len;
+ } else {
+ drv->tx = NULL;
+ }
+
+ if (t->rx_buf) {
+ drv->rx = t->rx_buf;
+ drv->rx_end = drv->rx + t->len;
+ } else {
+ drv->rx = NULL;
+ }
+
+ drv->transfer_len = t->len;
+
+ /* bits per word setup */
+ switch (t->bits_per_word) {
+ case 8:
+ cr_width = SPI_CTL_SIZE08;
+ drv->ops = &adi_spi_transfer_ops_u8;
+ break;
+ case 16:
+ cr_width = SPI_CTL_SIZE16;
+ drv->ops = &adi_spi_transfer_ops_u16;
+ break;
+ case 32:
+ cr_width = SPI_CTL_SIZE32;
+ drv->ops = &adi_spi_transfer_ops_u32;
+ break;
+ default:
+ return -EINVAL;
+ }
+ cr = ioread32(&drv->regs->control) & ~SPI_CTL_SIZE;
+ cr |= cr_width;
+ iowrite32(cr, &drv->regs->control);
+
+ /* speed setup */
+ iowrite32(hz_to_spi_clock(drv->sclk, t->speed_hz), &drv->regs->clock);
+ return 0;
+}
+
+static int adi_spi_dma_xfer(struct adi_spi_master *drv_data)
+{
+ struct spi_transfer *t = drv_data->cur_transfer;
+ struct spi_message *msg = drv_data->cur_msg;
+ struct adi_spi_device *chip = drv_data->cur_chip;
+ u32 dma_config;
+ unsigned long word_count, word_size;
+ void *tx_buf, *rx_buf;
+
+ switch (t->bits_per_word) {
+ case 8:
+ dma_config = WDSIZE_8 | PSIZE_8;
+ word_count = drv_data->transfer_len;
+ word_size = 1;
+ break;
+ case 16:
+ dma_config = WDSIZE_16 | PSIZE_16;
+ word_count = drv_data->transfer_len / 2;
+ word_size = 2;
+ break;
+ default:
+ dma_config = WDSIZE_32 | PSIZE_32;
+ word_count = drv_data->transfer_len / 4;
+ word_size = 4;
+ break;
+ }
+
+ if (!drv_data->rx) {
+ tx_buf = drv_data->tx;
+ rx_buf = &drv_data->dummy_buffer;
+ drv_data->tx_dma_size = drv_data->transfer_len;
+ drv_data->rx_dma_size = sizeof(drv_data->dummy_buffer);
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, 0);
+ } else if (!drv_data->tx) {
+ drv_data->dummy_buffer = chip->tx_dummy_val;
+ tx_buf = &drv_data->dummy_buffer;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = sizeof(drv_data->dummy_buffer);
+ drv_data->rx_dma_size = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, 0);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ } else {
+ tx_buf = drv_data->tx;
+ rx_buf = drv_data->rx;
+ drv_data->tx_dma_size = drv_data->rx_dma_size
+ = drv_data->transfer_len;
+ set_dma_x_modify(drv_data->tx_dma, word_size);
+ set_dma_x_modify(drv_data->rx_dma, word_size);
+ }
+
+ drv_data->tx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)tx_buf,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->tx_dma_addr))
+ return -ENOMEM;
+
+ drv_data->rx_dma_addr = dma_map_single(&msg->spi->dev,
+ (void *)rx_buf,
+ drv_data->rx_dma_size,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&msg->spi->dev,
+ drv_data->rx_dma_addr)) {
+ dma_unmap_single(&msg->spi->dev,
+ drv_data->tx_dma_addr,
+ drv_data->tx_dma_size,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+
+ dummy_read(drv_data);
+ set_dma_x_count(drv_data->tx_dma, word_count);
+ set_dma_x_count(drv_data->rx_dma, word_count);
+ set_dma_start_addr(drv_data->tx_dma, drv_data->tx_dma_addr);
+ set_dma_start_addr(drv_data->rx_dma, drv_data->rx_dma_addr);
+ dma_config |= DMAFLOW_STOP | RESTART | DI_EN;
+ set_dma_config(drv_data->tx_dma, dma_config);
+ set_dma_config(drv_data->rx_dma, dma_config | WNR);
+ enable_dma(drv_data->tx_dma);
+ enable_dma(drv_data->rx_dma);
+
+ iowrite32(SPI_RXCTL_REN | SPI_RXCTL_RDR_NE,
+ &drv_data->regs->rx_control);
+ iowrite32(SPI_TXCTL_TEN | SPI_TXCTL_TTI | SPI_TXCTL_TDR_NF,
+ &drv_data->regs->tx_control);
+
+ return 0;
+}
+
+static int adi_spi_pio_xfer(struct adi_spi_master *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ if (!drv_data->rx) {
+ /* write only half duplex */
+ drv_data->ops->write(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ } else if (!drv_data->tx) {
+ /* read only half duplex */
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ return -EIO;
+ } else {
+ /* full duplex mode */
+ drv_data->ops->duplex(drv_data);
+ if (drv_data->tx != drv_data->tx_end)
+ return -EIO;
+ }
+
+ if (!adi_spi_flush(drv_data))
+ return -EIO;
+ msg->actual_length += drv_data->transfer_len;
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+static void adi_spi_pump_transfers(unsigned long data)
+{
+ struct adi_spi_master *drv_data = (struct adi_spi_master *)data;
+ struct spi_message *msg = NULL;
+ struct spi_transfer *t = NULL;
+ struct adi_spi_device *chip = NULL;
+ int ret;
+
+ /* Get current state information */
+ msg = drv_data->cur_msg;
+ t = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ msg->status = -EIO;
+ adi_spi_giveback(drv_data);
+ return;
+ }
+
+ if (drv_data->state == RUNNING_STATE) {
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ if (t->cs_change)
+ adi_spi_cs_deactive(drv_data, chip);
+ adi_spi_next_transfer(drv_data);
+ t = drv_data->cur_transfer;
+ }
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ msg->status = 0;
+ adi_spi_giveback(drv_data);
+ return;
+ }
+
+ if ((t->len == 0) || (t->tx_buf == NULL && t->rx_buf == NULL)) {
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return;
+ }
+
+ ret = adi_spi_setup_transfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ adi_spi_giveback(drv_data);
+ }
+
+ iowrite32(0xFFFFFFFF, &drv_data->regs->status);
+ adi_spi_cs_active(drv_data, chip);
+ drv_data->state = RUNNING_STATE;
+
+ if (chip->enable_dma)
+ ret = adi_spi_dma_xfer(drv_data);
+ else
+ ret = adi_spi_pio_xfer(drv_data);
+ if (ret) {
+ msg->status = ret;
+ adi_spi_giveback(drv_data);
+ }
+}
+
+static int adi_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ drv_data->cur_msg = m;
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ adi_spi_restore_state(drv_data);
+
+ drv_data->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+#define MAX_SPI_SSEL 7
+
+static const u16 ssel[][MAX_SPI_SSEL] = {
+ {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
+ P_SPI0_SSEL4, P_SPI0_SSEL5,
+ P_SPI0_SSEL6, P_SPI0_SSEL7},
+
+ {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
+ P_SPI1_SSEL4, P_SPI1_SSEL5,
+ P_SPI1_SSEL6, P_SPI1_SSEL7},
+
+ {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
+ P_SPI2_SSEL4, P_SPI2_SSEL5,
+ P_SPI2_SSEL6, P_SPI2_SSEL7},
+};
+
+static int adi_spi_setup(struct spi_device *spi)
+{
+ struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
+ struct adi_spi_device *chip = spi_get_ctldata(spi);
+ u32 ctl_reg = SPI_CTL_ODM | SPI_CTL_PSSE;
+ int ret = -EINVAL;
+
+ if (!chip) {
+ struct adi_spi3_chip *chip_info = spi->controller_data;
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (chip_info) {
+ if (chip_info->control & ~ctl_reg) {
+ dev_err(&spi->dev,
+ "do not set bits that the SPI framework manages\n");
+ goto error;
+ }
+ chip->control = chip_info->control;
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->tx_dummy_val = chip_info->tx_dummy_val;
+ chip->enable_dma = chip_info->enable_dma;
+ }
+ chip->cs = spi->chip_select;
+
+ if (chip->cs < MAX_CTRL_CS) {
+ chip->ssel = (1 << chip->cs) << 8;
+ ret = peripheral_request(ssel[spi->master->bus_num]
+ [chip->cs-1], dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "peripheral_request() error\n");
+ goto error;
+ }
+ } else {
+ chip->cs_gpio = chip->cs - MAX_CTRL_CS;
+ ret = gpio_request_one(chip->cs_gpio, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "gpio_request_one() error\n");
+ goto error;
+ }
+ }
+ spi_set_ctldata(spi, chip);
+ }
+
+ /* force a default base state */
+ chip->control &= ctl_reg;
+
+ if (spi->mode & SPI_CPOL)
+ chip->control |= SPI_CTL_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->control |= SPI_CTL_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->control |= SPI_CTL_LSBF;
+ chip->control |= SPI_CTL_MSTR;
+ /* we choose software to controll cs */
+ chip->control &= ~SPI_CTL_ASSEL;
+
+ chip->clock = hz_to_spi_clock(drv_data->sclk, spi->max_speed_hz);
+
+ adi_spi_cs_enable(drv_data, chip);
+ adi_spi_cs_deactive(drv_data, chip);
+
+ return 0;
+error:
+ if (chip) {
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+ }
+
+ return ret;
+}
+
+static void adi_spi_cleanup(struct spi_device *spi)
+{
+ struct adi_spi_device *chip = spi_get_ctldata(spi);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(spi->master);
+
+ if (!chip)
+ return;
+
+ if (chip->cs < MAX_CTRL_CS) {
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->cs-1]);
+ adi_spi_cs_disable(drv_data, chip);
+ } else {
+ gpio_free(chip->cs_gpio);
+ }
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+static irqreturn_t adi_spi_tx_dma_isr(int irq, void *dev_id)
+{
+ struct adi_spi_master *drv_data = dev_id;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->tx_dma);
+ u32 tx_ctl;
+
+ clear_dma_irqstat(drv_data->tx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->tx_num++;
+ } else {
+ dev_err(&drv_data->master->dev,
+ "spi tx dma error: %d\n", dma_stat);
+ if (drv_data->tx)
+ drv_data->state = ERROR_STATE;
+ }
+ tx_ctl = ioread32(&drv_data->regs->tx_control);
+ tx_ctl &= ~SPI_TXCTL_TDR_NF;
+ iowrite32(tx_ctl, &drv_data->regs->tx_control);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t adi_spi_rx_dma_isr(int irq, void *dev_id)
+{
+ struct adi_spi_master *drv_data = dev_id;
+ struct spi_message *msg = drv_data->cur_msg;
+ u32 dma_stat = get_dma_curr_irqstat(drv_data->rx_dma);
+
+ clear_dma_irqstat(drv_data->rx_dma);
+ if (dma_stat & DMA_DONE) {
+ drv_data->rx_num++;
+ /* we may fail on tx dma */
+ if (drv_data->state != ERROR_STATE)
+ msg->actual_length += drv_data->transfer_len;
+ } else {
+ drv_data->state = ERROR_STATE;
+ dev_err(&drv_data->master->dev,
+ "spi rx dma error: %d\n", dma_stat);
+ }
+ iowrite32(0, &drv_data->regs->tx_control);
+ iowrite32(0, &drv_data->regs->rx_control);
+ if (drv_data->rx_num != drv_data->tx_num)
+ dev_dbg(&drv_data->master->dev,
+ "dma interrupt missing: tx=%d,rx=%d\n",
+ drv_data->tx_num, drv_data->rx_num);
+ tasklet_schedule(&drv_data->pump_transfers);
+ return IRQ_HANDLED;
+}
+
+static int adi_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct adi_spi3_master *info = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct adi_spi_master *drv_data;
+ struct resource *mem, *res;
+ unsigned int tx_dma, rx_dma;
+ struct clk *sclk;
+ int ret;
+
+ if (!info) {
+ dev_err(dev, "platform data missing!\n");
+ return -ENODEV;
+ }
+
+ sclk = devm_clk_get(dev, "spi");
+ if (IS_ERR(sclk)) {
+ dev_err(dev, "can not get spi clock\n");
+ return PTR_ERR(sclk);
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_err(dev, "can not get tx dma resource\n");
+ return -ENXIO;
+ }
+ tx_dma = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_err(dev, "can not get rx dma resource\n");
+ return -ENXIO;
+ }
+ rx_dma = res->start;
+
+ /* allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*drv_data));
+ if (!master) {
+ dev_err(dev, "can not alloc spi_master\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ /* the mode bits supported by this driver */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = info->num_chipselect;
+ master->cleanup = adi_spi_cleanup;
+ master->setup = adi_spi_setup;
+ master->transfer_one_message = adi_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->tx_dma = tx_dma;
+ drv_data->rx_dma = rx_dma;
+ drv_data->pin_req = info->pin_req;
+ drv_data->sclk = clk_get_rate(sclk);
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ drv_data->regs = devm_ioremap_resource(dev, mem);
+ if (IS_ERR(drv_data->regs)) {
+ ret = PTR_ERR(drv_data->regs);
+ goto err_put_master;
+ }
+
+ /* request tx and rx dma */
+ ret = request_dma(tx_dma, "SPI_TX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI TX DMA channel\n");
+ goto err_put_master;
+ }
+ set_dma_callback(tx_dma, adi_spi_tx_dma_isr, drv_data);
+
+ ret = request_dma(rx_dma, "SPI_RX_DMA");
+ if (ret) {
+ dev_err(dev, "can not request SPI RX DMA channel\n");
+ goto err_free_tx_dma;
+ }
+ set_dma_callback(drv_data->rx_dma, adi_spi_rx_dma_isr, drv_data);
+
+ /* request CLK, MOSI and MISO */
+ ret = peripheral_request_list(drv_data->pin_req, "adi-spi3");
+ if (ret < 0) {
+ dev_err(dev, "can not request spi pins\n");
+ goto err_free_rx_dma;
+ }
+
+ iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
+ iowrite32(0x0000FE00, &drv_data->regs->ssel);
+ iowrite32(0x0, &drv_data->regs->delay);
+
+ tasklet_init(&drv_data->pump_transfers,
+ adi_spi_pump_transfers, (unsigned long)drv_data);
+ /* register with the SPI framework */
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "can not register spi master\n");
+ goto err_free_peripheral;
+ }
+
+ return ret;
+
+err_free_peripheral:
+ peripheral_free_list(drv_data->pin_req);
+err_free_rx_dma:
+ free_dma(rx_dma);
+err_free_tx_dma:
+ free_dma(tx_dma);
+err_put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int adi_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ adi_spi_disable(drv_data);
+ peripheral_free_list(drv_data->pin_req);
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int adi_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ drv_data->control = ioread32(&drv_data->regs->control);
+ drv_data->ssel = ioread32(&drv_data->regs->ssel);
+
+ iowrite32(SPI_CTL_MSTR | SPI_CTL_CPHA, &drv_data->regs->control);
+ iowrite32(0x0000FE00, &drv_data->regs->ssel);
+ dma_disable_irq(drv_data->rx_dma);
+ dma_disable_irq(drv_data->tx_dma);
+
+ return 0;
+}
+
+static int adi_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct adi_spi_master *drv_data = spi_master_get_devdata(master);
+ int ret = 0;
+
+ /* bootrom may modify spi and dma status when resume in spi boot mode */
+ disable_dma(drv_data->rx_dma);
+
+ dma_enable_irq(drv_data->rx_dma);
+ dma_enable_irq(drv_data->tx_dma);
+ iowrite32(drv_data->control, &drv_data->regs->control);
+ iowrite32(drv_data->ssel, &drv_data->regs->ssel);
+
+ ret = spi_master_resume(master);
+ if (ret) {
+ free_dma(drv_data->rx_dma);
+ free_dma(drv_data->tx_dma);
+ }
+
+ return ret;
+}
+#endif
+static const struct dev_pm_ops adi_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(adi_spi_suspend, adi_spi_resume)
+};
+
+MODULE_ALIAS("platform:adi-spi3");
+static struct platform_driver adi_spi_driver = {
+ .driver = {
+ .name = "adi-spi3",
+ .pm = &adi_spi_pm_ops,
+ },
+ .remove = adi_spi_remove,
+};
+
+module_platform_driver_probe(adi_spi_driver, adi_spi_probe);
+
+MODULE_DESCRIPTION("Analog Devices SPI3 controller driver");
+MODULE_AUTHOR("Scott Jiang <Scott.Jiang.Linux@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-altera.c b/drivers/spi/spi-altera.c
new file mode 100644
index 000000000..b95010e72
--- /dev/null
+++ b/drivers/spi/spi-altera.c
@@ -0,0 +1,294 @@
+/*
+ * Altera SPI driver
+ *
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_altera"
+
+#define ALTERA_SPI_RXDATA 0
+#define ALTERA_SPI_TXDATA 4
+#define ALTERA_SPI_STATUS 8
+#define ALTERA_SPI_CONTROL 12
+#define ALTERA_SPI_SLAVE_SEL 20
+
+#define ALTERA_SPI_STATUS_ROE_MSK 0x8
+#define ALTERA_SPI_STATUS_TOE_MSK 0x10
+#define ALTERA_SPI_STATUS_TMT_MSK 0x20
+#define ALTERA_SPI_STATUS_TRDY_MSK 0x40
+#define ALTERA_SPI_STATUS_RRDY_MSK 0x80
+#define ALTERA_SPI_STATUS_E_MSK 0x100
+
+#define ALTERA_SPI_CONTROL_IROE_MSK 0x8
+#define ALTERA_SPI_CONTROL_ITOE_MSK 0x10
+#define ALTERA_SPI_CONTROL_ITRDY_MSK 0x40
+#define ALTERA_SPI_CONTROL_IRRDY_MSK 0x80
+#define ALTERA_SPI_CONTROL_IE_MSK 0x100
+#define ALTERA_SPI_CONTROL_SSO_MSK 0x400
+
+struct altera_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ int len;
+ int count;
+ int bytes_per_word;
+ unsigned long imr;
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+};
+
+static inline struct altera_spi *altera_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void altera_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ if (spi->mode & SPI_CS_HIGH) {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_SLAVE_SEL);
+ break;
+ }
+ } else {
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ hw->imr &= ~ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ writel(1 << spi->chip_select,
+ hw->base + ALTERA_SPI_SLAVE_SEL);
+ hw->imr |= ALTERA_SPI_CONTROL_SSO_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ break;
+ }
+ }
+}
+
+static inline unsigned int hw_txbyte(struct altera_spi *hw, int count)
+{
+ if (hw->tx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ return hw->tx[count];
+ case 2:
+ return (hw->tx[count * 2]
+ | (hw->tx[count * 2 + 1] << 8));
+ }
+ }
+ return 0;
+}
+
+static int altera_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct altera_spi *hw = altera_spi_to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->count = 0;
+ hw->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
+ hw->len = t->len / hw->bytes_per_word;
+
+ if (hw->irq >= 0) {
+ /* enable receive interrupt */
+ hw->imr |= ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+
+ /* send the first byte */
+ writel(hw_txbyte(hw, 0), hw->base + ALTERA_SPI_TXDATA);
+
+ wait_for_completion(&hw->done);
+ /* disable receive interrupt */
+ hw->imr &= ~ALTERA_SPI_CONTROL_IRRDY_MSK;
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ } else {
+ while (hw->count < hw->len) {
+ unsigned int rxd;
+
+ writel(hw_txbyte(hw, hw->count),
+ hw->base + ALTERA_SPI_TXDATA);
+
+ while (!(readl(hw->base + ALTERA_SPI_STATUS) &
+ ALTERA_SPI_STATUS_RRDY_MSK))
+ cpu_relax();
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+ }
+ }
+
+ return hw->count * hw->bytes_per_word;
+}
+
+static irqreturn_t altera_spi_irq(int irq, void *dev)
+{
+ struct altera_spi *hw = dev;
+ unsigned int rxd;
+
+ rxd = readl(hw->base + ALTERA_SPI_RXDATA);
+ if (hw->rx) {
+ switch (hw->bytes_per_word) {
+ case 1:
+ hw->rx[hw->count] = rxd;
+ break;
+ case 2:
+ hw->rx[hw->count * 2] = rxd;
+ hw->rx[hw->count * 2 + 1] = rxd >> 8;
+ break;
+ }
+ }
+
+ hw->count++;
+
+ if (hw->count < hw->len)
+ writel(hw_txbyte(hw, hw->count), hw->base + ALTERA_SPI_TXDATA);
+ else
+ complete(&hw->done);
+
+ return IRQ_HANDLED;
+}
+
+static int altera_spi_probe(struct platform_device *pdev)
+{
+ struct altera_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct altera_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->dev.of_node = pdev->dev.of_node;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = master;
+ hw->bitbang.chipselect = altera_spi_chipsel;
+ hw->bitbang.txrx_bufs = altera_spi_txrx;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
+ /* program defaults into the registers */
+ hw->imr = 0; /* disable spi interrupts */
+ writel(hw->imr, hw->base + ALTERA_SPI_CONTROL);
+ writel(0, hw->base + ALTERA_SPI_STATUS); /* clear status reg */
+ if (readl(hw->base + ALTERA_SPI_STATUS) & ALTERA_SPI_STATUS_RRDY_MSK)
+ readl(hw->base + ALTERA_SPI_RXDATA); /* flush rxdata */
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, altera_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+static int altera_spi_remove(struct platform_device *dev)
+{
+ struct altera_spi *hw = platform_get_drvdata(dev);
+ struct spi_master *master = hw->bitbang.master;
+
+ spi_bitbang_stop(&hw->bitbang);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id altera_spi_match[] = {
+ { .compatible = "ALTR,spi-1.0", },
+ { .compatible = "altr,spi-1.0", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, altera_spi_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver altera_spi_driver = {
+ .probe = altera_spi_probe,
+ .remove = altera_spi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = NULL,
+ .of_match_table = of_match_ptr(altera_spi_match),
+ },
+};
+module_platform_driver(altera_spi_driver);
+
+MODULE_DESCRIPTION("Altera SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-ath79.c b/drivers/spi/spi-ath79.c
new file mode 100644
index 000000000..b02eb4ac0
--- /dev/null
+++ b/drivers/spi/spi-ath79.c
@@ -0,0 +1,317 @@
+/*
+ * SPI controller driver for the Atheros AR71XX/AR724X/AR913X SoCs
+ *
+ * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ *
+ * This driver has been based on the spi-gpio.c:
+ * Copyright (C) 2006,2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+
+#include <asm/mach-ath79/ar71xx_regs.h>
+#include <asm/mach-ath79/ath79_spi_platform.h>
+
+#define DRV_NAME "ath79-spi"
+
+#define ATH79_SPI_RRW_DELAY_FACTOR 12000
+#define MHZ (1000 * 1000)
+
+struct ath79_spi {
+ struct spi_bitbang bitbang;
+ u32 ioc_base;
+ u32 reg_ctrl;
+ void __iomem *base;
+ struct clk *clk;
+ unsigned rrw_delay;
+};
+
+static inline u32 ath79_spi_rr(struct ath79_spi *sp, unsigned reg)
+{
+ return ioread32(sp->base + reg);
+}
+
+static inline void ath79_spi_wr(struct ath79_spi *sp, unsigned reg, u32 val)
+{
+ iowrite32(val, sp->base + reg);
+}
+
+static inline struct ath79_spi *ath79_spidev_to_sp(struct spi_device *spi)
+{
+ return spi_master_get_devdata(spi->master);
+}
+
+static inline void ath79_spi_delay(struct ath79_spi *sp, unsigned nsecs)
+{
+ if (nsecs > sp->rrw_delay)
+ ndelay(nsecs - sp->rrw_delay);
+}
+
+static void ath79_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ int cs_high = (spi->mode & SPI_CS_HIGH) ? is_active : !is_active;
+
+ if (is_active) {
+ /* set initial clock polarity */
+ if (spi->mode & SPI_CPOL)
+ sp->ioc_base |= AR71XX_SPI_IOC_CLK;
+ else
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CLK;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ }
+
+ if (spi->chip_select) {
+ struct ath79_spi_controller_data *cdata = spi->controller_data;
+
+ /* SPI is normally active-low */
+ gpio_set_value(cdata->gpio, cs_high);
+ } else {
+ if (cs_high)
+ sp->ioc_base |= AR71XX_SPI_IOC_CS0;
+ else
+ sp->ioc_base &= ~AR71XX_SPI_IOC_CS0;
+
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, sp->ioc_base);
+ }
+
+}
+
+static void ath79_spi_enable(struct ath79_spi *sp)
+{
+ /* enable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, AR71XX_SPI_FS_GPIO);
+
+ /* save CTRL register */
+ sp->reg_ctrl = ath79_spi_rr(sp, AR71XX_SPI_REG_CTRL);
+ sp->ioc_base = ath79_spi_rr(sp, AR71XX_SPI_REG_IOC);
+
+ /* TODO: setup speed? */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, 0x43);
+}
+
+static void ath79_spi_disable(struct ath79_spi *sp)
+{
+ /* restore CTRL register */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_CTRL, sp->reg_ctrl);
+ /* disable GPIO mode */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_FS, 0);
+}
+
+static int ath79_spi_setup_cs(struct spi_device *spi)
+{
+ struct ath79_spi_controller_data *cdata;
+ int status;
+
+ cdata = spi->controller_data;
+ if (spi->chip_select && !cdata)
+ return -EINVAL;
+
+ status = 0;
+ if (spi->chip_select) {
+ unsigned long flags;
+
+ flags = GPIOF_DIR_OUT;
+ if (spi->mode & SPI_CS_HIGH)
+ flags |= GPIOF_INIT_LOW;
+ else
+ flags |= GPIOF_INIT_HIGH;
+
+ status = gpio_request_one(cdata->gpio, flags,
+ dev_name(&spi->dev));
+ }
+
+ return status;
+}
+
+static void ath79_spi_cleanup_cs(struct spi_device *spi)
+{
+ if (spi->chip_select) {
+ struct ath79_spi_controller_data *cdata = spi->controller_data;
+ gpio_free(cdata->gpio);
+ }
+}
+
+static int ath79_spi_setup(struct spi_device *spi)
+{
+ int status = 0;
+
+ if (!spi->controller_state) {
+ status = ath79_spi_setup_cs(spi);
+ if (status)
+ return status;
+ }
+
+ status = spi_bitbang_setup(spi);
+ if (status && !spi->controller_state)
+ ath79_spi_cleanup_cs(spi);
+
+ return status;
+}
+
+static void ath79_spi_cleanup(struct spi_device *spi)
+{
+ ath79_spi_cleanup_cs(spi);
+ spi_bitbang_cleanup(spi);
+}
+
+static u32 ath79_spi_txrx_mode0(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits)
+{
+ struct ath79_spi *sp = ath79_spidev_to_sp(spi);
+ u32 ioc = sp->ioc_base;
+
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+ u32 out;
+
+ if (word & (1 << 31))
+ out = ioc | AR71XX_SPI_IOC_DO;
+ else
+ out = ioc & ~AR71XX_SPI_IOC_DO;
+
+ /* setup MSB (to slave) on trailing edge */
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+ ath79_spi_delay(sp, nsecs);
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out | AR71XX_SPI_IOC_CLK);
+ ath79_spi_delay(sp, nsecs);
+ if (bits == 1)
+ ath79_spi_wr(sp, AR71XX_SPI_REG_IOC, out);
+
+ word <<= 1;
+ }
+
+ return ath79_spi_rr(sp, AR71XX_SPI_REG_RDS);
+}
+
+static int ath79_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ath79_spi *sp;
+ struct ath79_spi_platform_data *pdata;
+ struct resource *r;
+ unsigned long rate;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sp));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ return -ENOMEM;
+ }
+
+ sp = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, sp);
+
+ pdata = dev_get_platdata(&pdev->dev);
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->setup = ath79_spi_setup;
+ master->cleanup = ath79_spi_cleanup;
+ if (pdata) {
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ }
+
+ sp->bitbang.master = master;
+ sp->bitbang.chipselect = ath79_spi_chipselect;
+ sp->bitbang.txrx_word[SPI_MODE_0] = ath79_spi_txrx_mode0;
+ sp->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+ sp->bitbang.flags = SPI_CS_HIGH;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err_put_master;
+ }
+
+ sp->base = devm_ioremap(&pdev->dev, r->start, resource_size(r));
+ if (!sp->base) {
+ ret = -ENXIO;
+ goto err_put_master;
+ }
+
+ sp->clk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sp->clk)) {
+ ret = PTR_ERR(sp->clk);
+ goto err_put_master;
+ }
+
+ ret = clk_enable(sp->clk);
+ if (ret)
+ goto err_put_master;
+
+ rate = DIV_ROUND_UP(clk_get_rate(sp->clk), MHZ);
+ if (!rate) {
+ ret = -EINVAL;
+ goto err_clk_disable;
+ }
+
+ sp->rrw_delay = ATH79_SPI_RRW_DELAY_FACTOR / rate;
+ dev_dbg(&pdev->dev, "register read/write delay is %u nsecs\n",
+ sp->rrw_delay);
+
+ ath79_spi_enable(sp);
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (ret)
+ goto err_disable;
+
+ return 0;
+
+err_disable:
+ ath79_spi_disable(sp);
+err_clk_disable:
+ clk_disable(sp->clk);
+err_put_master:
+ spi_master_put(sp->bitbang.master);
+
+ return ret;
+}
+
+static int ath79_spi_remove(struct platform_device *pdev)
+{
+ struct ath79_spi *sp = platform_get_drvdata(pdev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ ath79_spi_disable(sp);
+ clk_disable(sp->clk);
+ spi_master_put(sp->bitbang.master);
+
+ return 0;
+}
+
+static void ath79_spi_shutdown(struct platform_device *pdev)
+{
+ ath79_spi_remove(pdev);
+}
+
+static struct platform_driver ath79_spi_driver = {
+ .probe = ath79_spi_probe,
+ .remove = ath79_spi_remove,
+ .shutdown = ath79_spi_shutdown,
+ .driver = {
+ .name = DRV_NAME,
+ },
+};
+module_platform_driver(ath79_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Atheros AR71XX/AR724X/AR913X");
+MODULE_AUTHOR("Gabor Juhos <juhosg@openwrt.org>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
new file mode 100644
index 000000000..a2f40b1b2
--- /dev/null
+++ b/drivers/spi/spi-atmel.c
@@ -0,0 +1,1540 @@
+/*
+ * Driver for Atmel AT32 and AT91 SPI Controllers
+ *
+ * Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/slab.h>
+#include <linux/platform_data/atmel.h>
+#include <linux/platform_data/dma-atmel.h>
+#include <linux/of.h>
+
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/pm_runtime.h>
+
+/* SPI register offsets */
+#define SPI_CR 0x0000
+#define SPI_MR 0x0004
+#define SPI_RDR 0x0008
+#define SPI_TDR 0x000c
+#define SPI_SR 0x0010
+#define SPI_IER 0x0014
+#define SPI_IDR 0x0018
+#define SPI_IMR 0x001c
+#define SPI_CSR0 0x0030
+#define SPI_CSR1 0x0034
+#define SPI_CSR2 0x0038
+#define SPI_CSR3 0x003c
+#define SPI_VERSION 0x00fc
+#define SPI_RPR 0x0100
+#define SPI_RCR 0x0104
+#define SPI_TPR 0x0108
+#define SPI_TCR 0x010c
+#define SPI_RNPR 0x0110
+#define SPI_RNCR 0x0114
+#define SPI_TNPR 0x0118
+#define SPI_TNCR 0x011c
+#define SPI_PTCR 0x0120
+#define SPI_PTSR 0x0124
+
+/* Bitfields in CR */
+#define SPI_SPIEN_OFFSET 0
+#define SPI_SPIEN_SIZE 1
+#define SPI_SPIDIS_OFFSET 1
+#define SPI_SPIDIS_SIZE 1
+#define SPI_SWRST_OFFSET 7
+#define SPI_SWRST_SIZE 1
+#define SPI_LASTXFER_OFFSET 24
+#define SPI_LASTXFER_SIZE 1
+
+/* Bitfields in MR */
+#define SPI_MSTR_OFFSET 0
+#define SPI_MSTR_SIZE 1
+#define SPI_PS_OFFSET 1
+#define SPI_PS_SIZE 1
+#define SPI_PCSDEC_OFFSET 2
+#define SPI_PCSDEC_SIZE 1
+#define SPI_FDIV_OFFSET 3
+#define SPI_FDIV_SIZE 1
+#define SPI_MODFDIS_OFFSET 4
+#define SPI_MODFDIS_SIZE 1
+#define SPI_WDRBT_OFFSET 5
+#define SPI_WDRBT_SIZE 1
+#define SPI_LLB_OFFSET 7
+#define SPI_LLB_SIZE 1
+#define SPI_PCS_OFFSET 16
+#define SPI_PCS_SIZE 4
+#define SPI_DLYBCS_OFFSET 24
+#define SPI_DLYBCS_SIZE 8
+
+/* Bitfields in RDR */
+#define SPI_RD_OFFSET 0
+#define SPI_RD_SIZE 16
+
+/* Bitfields in TDR */
+#define SPI_TD_OFFSET 0
+#define SPI_TD_SIZE 16
+
+/* Bitfields in SR */
+#define SPI_RDRF_OFFSET 0
+#define SPI_RDRF_SIZE 1
+#define SPI_TDRE_OFFSET 1
+#define SPI_TDRE_SIZE 1
+#define SPI_MODF_OFFSET 2
+#define SPI_MODF_SIZE 1
+#define SPI_OVRES_OFFSET 3
+#define SPI_OVRES_SIZE 1
+#define SPI_ENDRX_OFFSET 4
+#define SPI_ENDRX_SIZE 1
+#define SPI_ENDTX_OFFSET 5
+#define SPI_ENDTX_SIZE 1
+#define SPI_RXBUFF_OFFSET 6
+#define SPI_RXBUFF_SIZE 1
+#define SPI_TXBUFE_OFFSET 7
+#define SPI_TXBUFE_SIZE 1
+#define SPI_NSSR_OFFSET 8
+#define SPI_NSSR_SIZE 1
+#define SPI_TXEMPTY_OFFSET 9
+#define SPI_TXEMPTY_SIZE 1
+#define SPI_SPIENS_OFFSET 16
+#define SPI_SPIENS_SIZE 1
+
+/* Bitfields in CSR0 */
+#define SPI_CPOL_OFFSET 0
+#define SPI_CPOL_SIZE 1
+#define SPI_NCPHA_OFFSET 1
+#define SPI_NCPHA_SIZE 1
+#define SPI_CSAAT_OFFSET 3
+#define SPI_CSAAT_SIZE 1
+#define SPI_BITS_OFFSET 4
+#define SPI_BITS_SIZE 4
+#define SPI_SCBR_OFFSET 8
+#define SPI_SCBR_SIZE 8
+#define SPI_DLYBS_OFFSET 16
+#define SPI_DLYBS_SIZE 8
+#define SPI_DLYBCT_OFFSET 24
+#define SPI_DLYBCT_SIZE 8
+
+/* Bitfields in RCR */
+#define SPI_RXCTR_OFFSET 0
+#define SPI_RXCTR_SIZE 16
+
+/* Bitfields in TCR */
+#define SPI_TXCTR_OFFSET 0
+#define SPI_TXCTR_SIZE 16
+
+/* Bitfields in RNCR */
+#define SPI_RXNCR_OFFSET 0
+#define SPI_RXNCR_SIZE 16
+
+/* Bitfields in TNCR */
+#define SPI_TXNCR_OFFSET 0
+#define SPI_TXNCR_SIZE 16
+
+/* Bitfields in PTCR */
+#define SPI_RXTEN_OFFSET 0
+#define SPI_RXTEN_SIZE 1
+#define SPI_RXTDIS_OFFSET 1
+#define SPI_RXTDIS_SIZE 1
+#define SPI_TXTEN_OFFSET 8
+#define SPI_TXTEN_SIZE 1
+#define SPI_TXTDIS_OFFSET 9
+#define SPI_TXTDIS_SIZE 1
+
+/* Constants for BITS */
+#define SPI_BITS_8_BPT 0
+#define SPI_BITS_9_BPT 1
+#define SPI_BITS_10_BPT 2
+#define SPI_BITS_11_BPT 3
+#define SPI_BITS_12_BPT 4
+#define SPI_BITS_13_BPT 5
+#define SPI_BITS_14_BPT 6
+#define SPI_BITS_15_BPT 7
+#define SPI_BITS_16_BPT 8
+
+/* Bit manipulation macros */
+#define SPI_BIT(name) \
+ (1 << SPI_##name##_OFFSET)
+#define SPI_BF(name, value) \
+ (((value) & ((1 << SPI_##name##_SIZE) - 1)) << SPI_##name##_OFFSET)
+#define SPI_BFEXT(name, value) \
+ (((value) >> SPI_##name##_OFFSET) & ((1 << SPI_##name##_SIZE) - 1))
+#define SPI_BFINS(name, value, old) \
+ (((old) & ~(((1 << SPI_##name##_SIZE) - 1) << SPI_##name##_OFFSET)) \
+ | SPI_BF(name, value))
+
+/* Register access macros */
+#ifdef CONFIG_AVR32
+#define spi_readl(port, reg) \
+ __raw_readl((port)->regs + SPI_##reg)
+#define spi_writel(port, reg, value) \
+ __raw_writel((value), (port)->regs + SPI_##reg)
+#else
+#define spi_readl(port, reg) \
+ readl_relaxed((port)->regs + SPI_##reg)
+#define spi_writel(port, reg, value) \
+ writel_relaxed((value), (port)->regs + SPI_##reg)
+#endif
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 16
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define AUTOSUSPEND_TIMEOUT 2000
+
+struct atmel_spi_dma {
+ struct dma_chan *chan_rx;
+ struct dma_chan *chan_tx;
+ struct scatterlist sgrx;
+ struct scatterlist sgtx;
+ struct dma_async_tx_descriptor *data_desc_rx;
+ struct dma_async_tx_descriptor *data_desc_tx;
+
+ struct at_dma_slave dma_slave;
+};
+
+struct atmel_spi_caps {
+ bool is_spi2;
+ bool has_wdrbt;
+ bool has_dma_support;
+};
+
+/*
+ * The core SPI transfer engine just talks to a register bank to set up
+ * DMA transfers; transfer queue progress is driven by IRQs. The clock
+ * framework provides the base clock, subdivided for each spi_device.
+ */
+struct atmel_spi {
+ spinlock_t lock;
+ unsigned long flags;
+
+ phys_addr_t phybase;
+ void __iomem *regs;
+ int irq;
+ struct clk *clk;
+ struct platform_device *pdev;
+
+ struct spi_transfer *current_transfer;
+ int current_remaining_bytes;
+ int done_status;
+
+ struct completion xfer_completion;
+
+ /* scratch buffer */
+ void *buffer;
+ dma_addr_t buffer_dma;
+
+ struct atmel_spi_caps caps;
+
+ bool use_dma;
+ bool use_pdc;
+ /* dmaengine data */
+ struct atmel_spi_dma dma;
+
+ bool keep_cs;
+ bool cs_active;
+};
+
+/* Controller-specific per-slave state */
+struct atmel_spi_device {
+ unsigned int npcs_pin;
+ u32 csr;
+};
+
+#define BUFFER_SIZE PAGE_SIZE
+#define INVALID_DMA_ADDRESS 0xffffffff
+
+/*
+ * Version 2 of the SPI controller has
+ * - CR.LASTXFER
+ * - SPI_MR.DIV32 may become FDIV or must-be-zero (here: always zero)
+ * - SPI_SR.TXEMPTY, SPI_SR.NSSR (and corresponding irqs)
+ * - SPI_CSRx.CSAAT
+ * - SPI_CSRx.SBCR allows faster clocking
+ */
+static bool atmel_spi_is_v2(struct atmel_spi *as)
+{
+ return as->caps.is_spi2;
+}
+
+/*
+ * Earlier SPI controllers (e.g. on at91rm9200) have a design bug whereby
+ * they assume that spi slave device state will not change on deselect, so
+ * that automagic deselection is OK. ("NPCSx rises if no data is to be
+ * transmitted") Not so! Workaround uses nCSx pins as GPIOs; or newer
+ * controllers have CSAAT and friends.
+ *
+ * Since the CSAAT functionality is a bit weird on newer controllers as
+ * well, we use GPIO to control nCSx pins on all controllers, updating
+ * MR.PCS to avoid confusing the controller. Using GPIOs also lets us
+ * support active-high chipselects despite the controller's belief that
+ * only active-low devices/systems exists.
+ *
+ * However, at91rm9200 has a second erratum whereby nCS0 doesn't work
+ * right when driven with GPIO. ("Mode Fault does not allow more than one
+ * Master on Chip Select 0.") No workaround exists for that ... so for
+ * nCS0 on that chip, we (a) don't use the GPIO, (b) can't support CS_HIGH,
+ * and (c) will trigger that first erratum in some cases.
+ */
+
+static void cs_activate(struct atmel_spi *as, struct spi_device *spi)
+{
+ struct atmel_spi_device *asd = spi->controller_state;
+ unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
+
+ if (atmel_spi_is_v2(as)) {
+ spi_writel(as, CSR0 + 4 * spi->chip_select, asd->csr);
+ /* For the low SPI version, there is a issue that PDC transfer
+ * on CS1,2,3 needs SPI_CSR0.BITS config as SPI_CSR1,2,3.BITS
+ */
+ spi_writel(as, CSR0, asd->csr);
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(WDRBT)
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR,
+ SPI_BF(PCS, ~(0x01 << spi->chip_select))
+ | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ }
+
+ mr = spi_readl(as, MR);
+ gpio_set_value(asd->npcs_pin, active);
+ } else {
+ u32 cpol = (spi->mode & SPI_CPOL) ? SPI_BIT(CPOL) : 0;
+ int i;
+ u32 csr;
+
+ /* Make sure clock polarity is correct */
+ for (i = 0; i < spi->master->num_chipselect; i++) {
+ csr = spi_readl(as, CSR0 + 4 * i);
+ if ((csr ^ cpol) & SPI_BIT(CPOL))
+ spi_writel(as, CSR0 + 4 * i,
+ csr ^ SPI_BIT(CPOL));
+ }
+
+ mr = spi_readl(as, MR);
+ mr = SPI_BFINS(PCS, ~(1 << spi->chip_select), mr);
+ if (spi->chip_select != 0)
+ gpio_set_value(asd->npcs_pin, active);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "activate %u%s, mr %08x\n",
+ asd->npcs_pin, active ? " (high)" : "",
+ mr);
+}
+
+static void cs_deactivate(struct atmel_spi *as, struct spi_device *spi)
+{
+ struct atmel_spi_device *asd = spi->controller_state;
+ unsigned active = spi->mode & SPI_CS_HIGH;
+ u32 mr;
+
+ /* only deactivate *this* device; sometimes transfers to
+ * another device may be active when this routine is called.
+ */
+ mr = spi_readl(as, MR);
+ if (~SPI_BFEXT(PCS, mr) & (1 << spi->chip_select)) {
+ mr = SPI_BFINS(PCS, 0xf, mr);
+ spi_writel(as, MR, mr);
+ }
+
+ dev_dbg(&spi->dev, "DEactivate %u%s, mr %08x\n",
+ asd->npcs_pin, active ? " (low)" : "",
+ mr);
+
+ if (atmel_spi_is_v2(as) || spi->chip_select != 0)
+ gpio_set_value(asd->npcs_pin, !active);
+}
+
+static void atmel_spi_lock(struct atmel_spi *as) __acquires(&as->lock)
+{
+ spin_lock_irqsave(&as->lock, as->flags);
+}
+
+static void atmel_spi_unlock(struct atmel_spi *as) __releases(&as->lock)
+{
+ spin_unlock_irqrestore(&as->lock, as->flags);
+}
+
+static inline bool atmel_spi_use_dma(struct atmel_spi *as,
+ struct spi_transfer *xfer)
+{
+ return as->use_dma && xfer->len >= DMA_MIN_BYTES;
+}
+
+static int atmel_spi_dma_slave_config(struct atmel_spi *as,
+ struct dma_slave_config *slave_config,
+ u8 bits_per_word)
+{
+ int err = 0;
+
+ if (bits_per_word > 8) {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ } else {
+ slave_config->dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config->src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ slave_config->dst_addr = (dma_addr_t)as->phybase + SPI_TDR;
+ slave_config->src_addr = (dma_addr_t)as->phybase + SPI_RDR;
+ slave_config->src_maxburst = 1;
+ slave_config->dst_maxburst = 1;
+ slave_config->device_fc = false;
+
+ slave_config->direction = DMA_MEM_TO_DEV;
+ if (dmaengine_slave_config(as->dma.chan_tx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure tx dma channel\n");
+ err = -EINVAL;
+ }
+
+ slave_config->direction = DMA_DEV_TO_MEM;
+ if (dmaengine_slave_config(as->dma.chan_rx, slave_config)) {
+ dev_err(&as->pdev->dev,
+ "failed to configure rx dma channel\n");
+ err = -EINVAL;
+ }
+
+ return err;
+}
+
+static int atmel_spi_configure_dma(struct atmel_spi *as)
+{
+ struct dma_slave_config slave_config;
+ struct device *dev = &as->pdev->dev;
+ int err;
+
+ dma_cap_mask_t mask;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ as->dma.chan_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(as->dma.chan_tx)) {
+ err = PTR_ERR(as->dma.chan_tx);
+ if (err == -EPROBE_DEFER) {
+ dev_warn(dev, "no DMA channel available at the moment\n");
+ return err;
+ }
+ dev_err(dev,
+ "DMA TX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto error;
+ }
+
+ /*
+ * No reason to check EPROBE_DEFER here since we have already requested
+ * tx channel. If it fails here, it's for another reason.
+ */
+ as->dma.chan_rx = dma_request_slave_channel(dev, "rx");
+
+ if (!as->dma.chan_rx) {
+ dev_err(dev,
+ "DMA RX channel not available, SPI unable to use DMA\n");
+ err = -EBUSY;
+ goto error;
+ }
+
+ err = atmel_spi_dma_slave_config(as, &slave_config, 8);
+ if (err)
+ goto error;
+
+ dev_info(&as->pdev->dev,
+ "Using %s (tx) and %s (rx) for DMA transfers\n",
+ dma_chan_name(as->dma.chan_tx),
+ dma_chan_name(as->dma.chan_rx));
+ return 0;
+error:
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (!IS_ERR(as->dma.chan_tx))
+ dma_release_channel(as->dma.chan_tx);
+ return err;
+}
+
+static void atmel_spi_stop_dma(struct atmel_spi *as)
+{
+ if (as->dma.chan_rx)
+ dmaengine_terminate_all(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dmaengine_terminate_all(as->dma.chan_tx);
+}
+
+static void atmel_spi_release_dma(struct atmel_spi *as)
+{
+ if (as->dma.chan_rx)
+ dma_release_channel(as->dma.chan_rx);
+ if (as->dma.chan_tx)
+ dma_release_channel(as->dma.chan_tx);
+}
+
+/* This function is called by the DMA driver from tasklet context */
+static void dma_callback(void *data)
+{
+ struct spi_master *master = data;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ complete(&as->xfer_completion);
+}
+
+/*
+ * Next transfer using PIO.
+ */
+static void atmel_spi_next_xfer_pio(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_pio\n");
+
+ /* Make sure data is not remaining in RDR */
+ spi_readl(as, RDR);
+ while (spi_readl(as, SR) & SPI_BIT(RDRF)) {
+ spi_readl(as, RDR);
+ cpu_relax();
+ }
+
+ if (xfer->tx_buf) {
+ if (xfer->bits_per_word > 8)
+ spi_writel(as, TDR, *(u16 *)(xfer->tx_buf + xfer_pos));
+ else
+ spi_writel(as, TDR, *(u8 *)(xfer->tx_buf + xfer_pos));
+ } else {
+ spi_writel(as, TDR, 0);
+ }
+
+ dev_dbg(master->dev.parent,
+ " start pio xfer %p: len %u tx %p rx %p bitpw %d\n",
+ xfer, xfer->len, xfer->tx_buf, xfer->rx_buf,
+ xfer->bits_per_word);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(RDRF) | SPI_BIT(OVRES));
+}
+
+/*
+ * Submit next transfer for DMA.
+ */
+static int atmel_spi_next_xfer_dma_submit(struct spi_master *master,
+ struct spi_transfer *xfer,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ struct dma_chan *rxchan = as->dma.chan_rx;
+ struct dma_chan *txchan = as->dma.chan_tx;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ struct dma_slave_config slave_config;
+ dma_cookie_t cookie;
+ u32 len = *plen;
+
+ dev_vdbg(master->dev.parent, "atmel_spi_next_xfer_dma_submit\n");
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /* release lock for DMA operations */
+ atmel_spi_unlock(as);
+
+ /* prepare the RX dma transfer */
+ sg_init_table(&as->dma.sgrx, 1);
+ if (xfer->rx_buf) {
+ as->dma.sgrx.dma_address = xfer->rx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgrx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+
+ /* prepare the TX dma transfer */
+ sg_init_table(&as->dma.sgtx, 1);
+ if (xfer->tx_buf) {
+ as->dma.sgtx.dma_address = xfer->tx_dma + xfer->len - *plen;
+ } else {
+ as->dma.sgtx.dma_address = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ }
+
+ sg_dma_len(&as->dma.sgtx) = len;
+ sg_dma_len(&as->dma.sgrx) = len;
+
+ *plen = len;
+
+ if (atmel_spi_dma_slave_config(as, &slave_config, 8))
+ goto err_exit;
+
+ /* Send both scatterlists */
+ rxdesc = dmaengine_prep_slave_sg(rxchan, &as->dma.sgrx, 1,
+ DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_dma;
+
+ txdesc = dmaengine_prep_slave_sg(txchan, &as->dma.sgtx, 1,
+ DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_dma;
+
+ dev_dbg(master->dev.parent,
+ " start dma xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf, (unsigned long long)xfer->tx_dma,
+ xfer->rx_buf, (unsigned long long)xfer->rx_dma);
+
+ /* Enable relevant interrupts */
+ spi_writel(as, IER, SPI_BIT(OVRES));
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = master;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ cookie = rxdesc->tx_submit(rxdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ cookie = txdesc->tx_submit(txdesc);
+ if (dma_submit_error(cookie))
+ goto err_dma;
+ rxchan->device->device_issue_pending(rxchan);
+ txchan->device->device_issue_pending(txchan);
+
+ /* take back lock */
+ atmel_spi_lock(as);
+ return 0;
+
+err_dma:
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ atmel_spi_stop_dma(as);
+err_exit:
+ atmel_spi_lock(as);
+ return -ENOMEM;
+}
+
+static void atmel_spi_next_xfer_data(struct spi_master *master,
+ struct spi_transfer *xfer,
+ dma_addr_t *tx_dma,
+ dma_addr_t *rx_dma,
+ u32 *plen)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len = *plen;
+
+ /* use scratch buffer only when rx or tx data is unspecified */
+ if (xfer->rx_buf)
+ *rx_dma = xfer->rx_dma + xfer->len - *plen;
+ else {
+ *rx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ }
+
+ if (xfer->tx_buf)
+ *tx_dma = xfer->tx_dma + xfer->len - *plen;
+ else {
+ *tx_dma = as->buffer_dma;
+ if (len > BUFFER_SIZE)
+ len = BUFFER_SIZE;
+ memset(as->buffer, 0, len);
+ dma_sync_single_for_device(&as->pdev->dev,
+ as->buffer_dma, len, DMA_TO_DEVICE);
+ }
+
+ *plen = len;
+}
+
+static int atmel_spi_set_xfer_speed(struct atmel_spi *as,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ u32 scbr, csr;
+ unsigned long bus_hz;
+
+ /* v1 chips start out at half the peripheral bus speed. */
+ bus_hz = clk_get_rate(as->clk);
+ if (!atmel_spi_is_v2(as))
+ bus_hz /= 2;
+
+ /*
+ * Calculate the lowest divider that satisfies the
+ * constraint, assuming div32/fdiv/mbz == 0.
+ */
+ if (xfer->speed_hz)
+ scbr = DIV_ROUND_UP(bus_hz, xfer->speed_hz);
+ else
+ /*
+ * This can happend if max_speed is null.
+ * In this case, we set the lowest possible speed
+ */
+ scbr = 0xff;
+
+ /*
+ * If the resulting divider doesn't fit into the
+ * register bitfield, we can't satisfy the constraint.
+ */
+ if (scbr >= (1 << SPI_SCBR_SIZE)) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too slow, scbr %u; min %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz/255);
+ return -EINVAL;
+ }
+ if (scbr == 0) {
+ dev_err(&spi->dev,
+ "setup: %d Hz too high, scbr %u; max %ld Hz\n",
+ xfer->speed_hz, scbr, bus_hz);
+ return -EINVAL;
+ }
+ csr = spi_readl(as, CSR0 + 4 * spi->chip_select);
+ csr = SPI_BFINS(SCBR, scbr, csr);
+ spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+
+ return 0;
+}
+
+/*
+ * Submit next transfer for PDC.
+ * lock is held, spi irq is blocked
+ */
+static void atmel_spi_pdc_next_xfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 len;
+ dma_addr_t tx_dma, rx_dma;
+
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
+
+ spi_writel(as, RPR, rx_dma);
+ spi_writel(as, TPR, tx_dma);
+
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RCR, len);
+ spi_writel(as, TCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " start xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
+
+ if (as->current_remaining_bytes) {
+ len = as->current_remaining_bytes;
+ atmel_spi_next_xfer_data(master, xfer, &tx_dma, &rx_dma, &len);
+ as->current_remaining_bytes -= len;
+
+ spi_writel(as, RNPR, rx_dma);
+ spi_writel(as, TNPR, tx_dma);
+
+ if (msg->spi->bits_per_word > 8)
+ len >>= 1;
+ spi_writel(as, RNCR, len);
+ spi_writel(as, TNCR, len);
+
+ dev_dbg(&msg->spi->dev,
+ " next xfer %p: len %u tx %p/%08llx rx %p/%08llx\n",
+ xfer, xfer->len, xfer->tx_buf,
+ (unsigned long long)xfer->tx_dma, xfer->rx_buf,
+ (unsigned long long)xfer->rx_dma);
+ }
+
+ /* REVISIT: We're waiting for RXBUFF before we start the next
+ * transfer because we need to handle some difficult timing
+ * issues otherwise. If we wait for TXBUFE in one transfer and
+ * then starts waiting for RXBUFF in the next, it's difficult
+ * to tell the difference between the RXBUFF interrupt we're
+ * actually waiting for and the RXBUFF interrupt of the
+ * previous transfer.
+ *
+ * It should be doable, though. Just not now...
+ */
+ spi_writel(as, IER, SPI_BIT(RXBUFF) | SPI_BIT(OVRES));
+ spi_writel(as, PTCR, SPI_BIT(TXTEN) | SPI_BIT(RXTEN));
+}
+
+/*
+ * For DMA, tx_buf/tx_dma have the same relationship as rx_buf/rx_dma:
+ * - The buffer is either valid for CPU access, else NULL
+ * - If the buffer is valid, so is its DMA address
+ *
+ * This driver manages the dma address unless message->is_dma_mapped.
+ */
+static int
+atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ struct device *dev = &as->pdev->dev;
+
+ xfer->tx_dma = xfer->rx_dma = INVALID_DMA_ADDRESS;
+ if (xfer->tx_buf) {
+ /* tx_buf is a const void* where we need a void * for the dma
+ * mapping */
+ void *nonconst_tx = (void *)xfer->tx_buf;
+
+ xfer->tx_dma = dma_map_single(dev,
+ nonconst_tx, xfer->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, xfer->tx_dma))
+ return -ENOMEM;
+ }
+ if (xfer->rx_buf) {
+ xfer->rx_dma = dma_map_single(dev,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, xfer->rx_dma)) {
+ if (xfer->tx_buf)
+ dma_unmap_single(dev,
+ xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+ return -ENOMEM;
+ }
+ }
+ return 0;
+}
+
+static void atmel_spi_dma_unmap_xfer(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ if (xfer->tx_dma != INVALID_DMA_ADDRESS)
+ dma_unmap_single(master->dev.parent, xfer->tx_dma,
+ xfer->len, DMA_TO_DEVICE);
+ if (xfer->rx_dma != INVALID_DMA_ADDRESS)
+ dma_unmap_single(master->dev.parent, xfer->rx_dma,
+ xfer->len, DMA_FROM_DEVICE);
+}
+
+static void atmel_spi_disable_pdc_transfer(struct atmel_spi *as)
+{
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+}
+
+/* Called from IRQ
+ *
+ * Must update "current_remaining_bytes" to keep track of data
+ * to transfer.
+ */
+static void
+atmel_spi_pump_pio_data(struct atmel_spi *as, struct spi_transfer *xfer)
+{
+ u8 *rxp;
+ u16 *rxp16;
+ unsigned long xfer_pos = xfer->len - as->current_remaining_bytes;
+
+ if (xfer->rx_buf) {
+ if (xfer->bits_per_word > 8) {
+ rxp16 = (u16 *)(((u8 *)xfer->rx_buf) + xfer_pos);
+ *rxp16 = spi_readl(as, RDR);
+ } else {
+ rxp = ((u8 *)xfer->rx_buf) + xfer_pos;
+ *rxp = spi_readl(as, RDR);
+ }
+ } else {
+ spi_readl(as, RDR);
+ }
+ if (xfer->bits_per_word > 8) {
+ if (as->current_remaining_bytes > 2)
+ as->current_remaining_bytes -= 2;
+ else
+ as->current_remaining_bytes = 0;
+ } else {
+ as->current_remaining_bytes--;
+ }
+}
+
+/* Interrupt
+ *
+ * No need for locking in this Interrupt handler: done_status is the
+ * only information modified.
+ */
+static irqreturn_t
+atmel_spi_pio_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ struct spi_transfer *xfer;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, SPI_BIT(OVRES));
+ dev_warn(master->dev.parent, "overrun\n");
+
+ /*
+ * When we get an overrun, we disregard the current
+ * transfer. Data will not be copied back from any
+ * bounce buffer and msg->actual_len will not be
+ * updated with the last xfer.
+ *
+ * We will also not process any remaning transfers in
+ * the message.
+ */
+ as->done_status = -EIO;
+ smp_wmb();
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ complete(&as->xfer_completion);
+
+ } else if (pending & SPI_BIT(RDRF)) {
+ atmel_spi_lock(as);
+
+ if (as->current_remaining_bytes) {
+ ret = IRQ_HANDLED;
+ xfer = as->current_transfer;
+ atmel_spi_pump_pio_data(as, xfer);
+ if (!as->current_remaining_bytes)
+ spi_writel(as, IDR, pending);
+
+ complete(&as->xfer_completion);
+ }
+
+ atmel_spi_unlock(as);
+ } else {
+ WARN_ONCE(pending, "IRQ not handled, pending = %x\n", pending);
+ ret = IRQ_HANDLED;
+ spi_writel(as, IDR, pending);
+ }
+
+ return ret;
+}
+
+static irqreturn_t
+atmel_spi_pdc_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct atmel_spi *as = spi_master_get_devdata(master);
+ u32 status, pending, imr;
+ int ret = IRQ_NONE;
+
+ imr = spi_readl(as, IMR);
+ status = spi_readl(as, SR);
+ pending = status & imr;
+
+ if (pending & SPI_BIT(OVRES)) {
+
+ ret = IRQ_HANDLED;
+
+ spi_writel(as, IDR, (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX)
+ | SPI_BIT(OVRES)));
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ as->done_status = -EIO;
+
+ complete(&as->xfer_completion);
+
+ } else if (pending & (SPI_BIT(RXBUFF) | SPI_BIT(ENDRX))) {
+ ret = IRQ_HANDLED;
+
+ spi_writel(as, IDR, pending);
+
+ complete(&as->xfer_completion);
+ }
+
+ return ret;
+}
+
+static int atmel_spi_setup(struct spi_device *spi)
+{
+ struct atmel_spi *as;
+ struct atmel_spi_device *asd;
+ u32 csr;
+ unsigned int bits = spi->bits_per_word;
+ unsigned int npcs_pin;
+ int ret;
+
+ as = spi_master_get_devdata(spi->master);
+
+ /* see notes above re chipselect */
+ if (!atmel_spi_is_v2(as)
+ && spi->chip_select == 0
+ && (spi->mode & SPI_CS_HIGH)) {
+ dev_dbg(&spi->dev, "setup: can't be active-high\n");
+ return -EINVAL;
+ }
+
+ csr = SPI_BF(BITS, bits - 8);
+ if (spi->mode & SPI_CPOL)
+ csr |= SPI_BIT(CPOL);
+ if (!(spi->mode & SPI_CPHA))
+ csr |= SPI_BIT(NCPHA);
+
+ /* DLYBS is mostly irrelevant since we manage chipselect using GPIOs.
+ *
+ * DLYBCT would add delays between words, slowing down transfers.
+ * It could potentially be useful to cope with DMA bottlenecks, but
+ * in those cases it's probably best to just use a lower bitrate.
+ */
+ csr |= SPI_BF(DLYBS, 0);
+ csr |= SPI_BF(DLYBCT, 0);
+
+ /* chipselect must have been muxed as GPIO (e.g. in board setup) */
+ npcs_pin = (unsigned long)spi->controller_data;
+
+ if (gpio_is_valid(spi->cs_gpio))
+ npcs_pin = spi->cs_gpio;
+
+ asd = spi->controller_state;
+ if (!asd) {
+ asd = kzalloc(sizeof(struct atmel_spi_device), GFP_KERNEL);
+ if (!asd)
+ return -ENOMEM;
+
+ ret = gpio_request(npcs_pin, dev_name(&spi->dev));
+ if (ret) {
+ kfree(asd);
+ return ret;
+ }
+
+ asd->npcs_pin = npcs_pin;
+ spi->controller_state = asd;
+ gpio_direction_output(npcs_pin, !(spi->mode & SPI_CS_HIGH));
+ }
+
+ asd->csr = csr;
+
+ dev_dbg(&spi->dev,
+ "setup: bpw %u mode 0x%x -> csr%d %08x\n",
+ bits, spi->mode, spi->chip_select, csr);
+
+ if (!atmel_spi_is_v2(as))
+ spi_writel(as, CSR0 + 4 * spi->chip_select, csr);
+
+ return 0;
+}
+
+static int atmel_spi_one_transfer(struct spi_master *master,
+ struct spi_message *msg,
+ struct spi_transfer *xfer)
+{
+ struct atmel_spi *as;
+ struct spi_device *spi = msg->spi;
+ u8 bits;
+ u32 len;
+ struct atmel_spi_device *asd;
+ int timeout;
+ int ret;
+ unsigned long dma_timeout;
+
+ as = spi_master_get_devdata(master);
+
+ if (!(xfer->tx_buf || xfer->rx_buf) && xfer->len) {
+ dev_dbg(&spi->dev, "missing rx or tx buf\n");
+ return -EINVAL;
+ }
+
+ if (xfer->bits_per_word) {
+ asd = spi->controller_state;
+ bits = (asd->csr >> 4) & 0xf;
+ if (bits != xfer->bits_per_word - 8) {
+ dev_dbg(&spi->dev,
+ "you can't yet change bits_per_word in transfers\n");
+ return -ENOPROTOOPT;
+ }
+ }
+
+ /*
+ * DMA map early, for performance (empties dcache ASAP) and
+ * better fault reporting.
+ */
+ if ((!msg->is_dma_mapped)
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc)) {
+ if (atmel_spi_dma_map_xfer(as, xfer) < 0)
+ return -ENOMEM;
+ }
+
+ atmel_spi_set_xfer_speed(as, msg->spi, xfer);
+
+ as->done_status = 0;
+ as->current_transfer = xfer;
+ as->current_remaining_bytes = xfer->len;
+ while (as->current_remaining_bytes) {
+ reinit_completion(&as->xfer_completion);
+
+ if (as->use_pdc) {
+ atmel_spi_pdc_next_xfer(master, msg, xfer);
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ len = as->current_remaining_bytes;
+ ret = atmel_spi_next_xfer_dma_submit(master,
+ xfer, &len);
+ if (ret) {
+ dev_err(&spi->dev,
+ "unable to use DMA, fallback to PIO\n");
+ atmel_spi_next_xfer_pio(master, xfer);
+ } else {
+ as->current_remaining_bytes -= len;
+ if (as->current_remaining_bytes < 0)
+ as->current_remaining_bytes = 0;
+ }
+ } else {
+ atmel_spi_next_xfer_pio(master, xfer);
+ }
+
+ /* interrupts are disabled, so free the lock for schedule */
+ atmel_spi_unlock(as);
+ dma_timeout = wait_for_completion_timeout(&as->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ atmel_spi_lock(as);
+ if (WARN_ON(dma_timeout == 0)) {
+ dev_err(&spi->dev, "spi transfer timeout\n");
+ as->done_status = -EIO;
+ }
+
+ if (as->done_status)
+ break;
+ }
+
+ if (as->done_status) {
+ if (as->use_pdc) {
+ dev_warn(master->dev.parent,
+ "overrun (%u/%u remaining)\n",
+ spi_readl(as, TCR), spi_readl(as, RCR));
+
+ /*
+ * Clean up DMA registers and make sure the data
+ * registers are empty.
+ */
+ spi_writel(as, RNCR, 0);
+ spi_writel(as, TNCR, 0);
+ spi_writel(as, RCR, 0);
+ spi_writel(as, TCR, 0);
+ for (timeout = 1000; timeout; timeout--)
+ if (spi_readl(as, SR) & SPI_BIT(TXEMPTY))
+ break;
+ if (!timeout)
+ dev_warn(master->dev.parent,
+ "timeout waiting for TXEMPTY");
+ while (spi_readl(as, SR) & SPI_BIT(RDRF))
+ spi_readl(as, RDR);
+
+ /* Clear any overrun happening while cleaning up */
+ spi_readl(as, SR);
+
+ } else if (atmel_spi_use_dma(as, xfer)) {
+ atmel_spi_stop_dma(as);
+ }
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ return 0;
+
+ } else {
+ /* only update length if no error */
+ msg->actual_length += xfer->len;
+ }
+
+ if (!msg->is_dma_mapped
+ && (atmel_spi_use_dma(as, xfer) || as->use_pdc))
+ atmel_spi_dma_unmap_xfer(master, xfer);
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ as->keep_cs = true;
+ } else {
+ as->cs_active = !as->cs_active;
+ if (as->cs_active)
+ cs_activate(as, msg->spi);
+ else
+ cs_deactivate(as, msg->spi);
+ }
+ }
+
+ return 0;
+}
+
+static int atmel_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct atmel_spi *as;
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret = 0;
+
+ as = spi_master_get_devdata(master);
+
+ dev_dbg(&spi->dev, "new message %p submitted for %s\n",
+ msg, dev_name(&spi->dev));
+
+ atmel_spi_lock(as);
+ cs_activate(as, spi);
+
+ as->cs_active = true;
+ as->keep_cs = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ ret = atmel_spi_one_transfer(master, msg, xfer);
+ if (ret)
+ goto msg_done;
+ }
+
+ if (as->use_pdc)
+ atmel_spi_disable_pdc_transfer(as);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ dev_dbg(&spi->dev,
+ " xfer %p: len %u tx %p/%pad rx %p/%pad\n",
+ xfer, xfer->len,
+ xfer->tx_buf, &xfer->tx_dma,
+ xfer->rx_buf, &xfer->rx_dma);
+ }
+
+msg_done:
+ if (!as->keep_cs)
+ cs_deactivate(as, msg->spi);
+
+ atmel_spi_unlock(as);
+
+ msg->status = as->done_status;
+ spi_finalize_current_message(spi->master);
+
+ return ret;
+}
+
+static void atmel_spi_cleanup(struct spi_device *spi)
+{
+ struct atmel_spi_device *asd = spi->controller_state;
+ unsigned gpio = (unsigned long) spi->controller_data;
+
+ if (!asd)
+ return;
+
+ spi->controller_state = NULL;
+ gpio_free(gpio);
+ kfree(asd);
+}
+
+static inline unsigned int atmel_get_version(struct atmel_spi *as)
+{
+ return spi_readl(as, VERSION) & 0x00000fff;
+}
+
+static void atmel_get_caps(struct atmel_spi *as)
+{
+ unsigned int version;
+
+ version = atmel_get_version(as);
+ dev_info(&as->pdev->dev, "version: 0x%x\n", version);
+
+ as->caps.is_spi2 = version > 0x121;
+ as->caps.has_wdrbt = version >= 0x210;
+ as->caps.has_dma_support = version >= 0x212;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static int atmel_spi_probe(struct platform_device *pdev)
+{
+ struct resource *regs;
+ int irq;
+ struct clk *clk;
+ int ret;
+ struct spi_master *master;
+ struct atmel_spi *as;
+
+ /* Select default pin state */
+ pinctrl_pm_select_default_state(&pdev->dev);
+
+ regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!regs)
+ return -ENXIO;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ clk = devm_clk_get(&pdev->dev, "spi_clk");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ /* setup spi core then atmel-specific driver state */
+ ret = -ENOMEM;
+ master = spi_alloc_master(&pdev->dev, sizeof(*as));
+ if (!master)
+ goto out_free;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = master->dev.of_node ? 0 : 4;
+ master->setup = atmel_spi_setup;
+ master->transfer_one_message = atmel_spi_transfer_one_message;
+ master->cleanup = atmel_spi_cleanup;
+ master->auto_runtime_pm = true;
+ platform_set_drvdata(pdev, master);
+
+ as = spi_master_get_devdata(master);
+
+ /*
+ * Scratch buffer is used for throwaway rx and tx data.
+ * It's coherent to minimize dcache pollution.
+ */
+ as->buffer = dma_alloc_coherent(&pdev->dev, BUFFER_SIZE,
+ &as->buffer_dma, GFP_KERNEL);
+ if (!as->buffer)
+ goto out_free;
+
+ spin_lock_init(&as->lock);
+
+ as->pdev = pdev;
+ as->regs = devm_ioremap_resource(&pdev->dev, regs);
+ if (IS_ERR(as->regs)) {
+ ret = PTR_ERR(as->regs);
+ goto out_free_buffer;
+ }
+ as->phybase = regs->start;
+ as->irq = irq;
+ as->clk = clk;
+
+ init_completion(&as->xfer_completion);
+
+ atmel_get_caps(as);
+
+ as->use_dma = false;
+ as->use_pdc = false;
+ if (as->caps.has_dma_support) {
+ ret = atmel_spi_configure_dma(as);
+ if (ret == 0)
+ as->use_dma = true;
+ else if (ret == -EPROBE_DEFER)
+ return ret;
+ } else {
+ as->use_pdc = true;
+ }
+
+ if (as->caps.has_dma_support && !as->use_dma)
+ dev_info(&pdev->dev, "Atmel SPI Controller using PIO only\n");
+
+ if (as->use_pdc) {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pdc_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ } else {
+ ret = devm_request_irq(&pdev->dev, irq, atmel_spi_pio_interrupt,
+ 0, dev_name(&pdev->dev), master);
+ }
+ if (ret)
+ goto out_unmap_regs;
+
+ /* Initialize the hardware */
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto out_free_irq;
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ if (as->caps.has_wdrbt) {
+ spi_writel(as, MR, SPI_BIT(WDRBT) | SPI_BIT(MODFDIS)
+ | SPI_BIT(MSTR));
+ } else {
+ spi_writel(as, MR, SPI_BIT(MSTR) | SPI_BIT(MODFDIS));
+ }
+
+ if (as->use_pdc)
+ spi_writel(as, PTCR, SPI_BIT(RXTDIS) | SPI_BIT(TXTDIS));
+ spi_writel(as, CR, SPI_BIT(SPIEN));
+
+ /* go! */
+ dev_info(&pdev->dev, "Atmel SPI Controller at 0x%08lx (irq %d)\n",
+ (unsigned long)regs->start, irq);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto out_free_dma;
+
+ return 0;
+
+out_free_dma:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (as->use_dma)
+ atmel_spi_release_dma(as);
+
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ clk_disable_unprepare(clk);
+out_free_irq:
+out_unmap_regs:
+out_free_buffer:
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
+ as->buffer_dma);
+out_free:
+ spi_master_put(master);
+ return ret;
+}
+
+static int atmel_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* reset the hardware and block queue progress */
+ spin_lock_irq(&as->lock);
+ if (as->use_dma) {
+ atmel_spi_stop_dma(as);
+ atmel_spi_release_dma(as);
+ }
+
+ spi_writel(as, CR, SPI_BIT(SWRST));
+ spi_writel(as, CR, SPI_BIT(SWRST)); /* AT91SAM9263 Rev B workaround */
+ spi_readl(as, SR);
+ spin_unlock_irq(&as->lock);
+
+ dma_free_coherent(&pdev->dev, BUFFER_SIZE, as->buffer,
+ as->buffer_dma);
+
+ clk_disable_unprepare(as->clk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int atmel_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(as->clk);
+ pinctrl_pm_select_sleep_state(dev);
+
+ return 0;
+}
+
+static int atmel_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct atmel_spi *as = spi_master_get_devdata(master);
+
+ pinctrl_pm_select_default_state(dev);
+
+ return clk_prepare_enable(as->clk);
+}
+
+static int atmel_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ /* Stop the queue running */
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ if (!pm_runtime_suspended(dev))
+ atmel_spi_runtime_suspend(dev);
+
+ return 0;
+}
+
+static int atmel_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = atmel_spi_runtime_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ /* Start the queue running */
+ ret = spi_master_resume(master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops atmel_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(atmel_spi_suspend, atmel_spi_resume)
+ SET_RUNTIME_PM_OPS(atmel_spi_runtime_suspend,
+ atmel_spi_runtime_resume, NULL)
+};
+#define ATMEL_SPI_PM_OPS (&atmel_spi_pm_ops)
+#else
+#define ATMEL_SPI_PM_OPS NULL
+#endif
+
+#if defined(CONFIG_OF)
+static const struct of_device_id atmel_spi_dt_ids[] = {
+ { .compatible = "atmel,at91rm9200-spi" },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, atmel_spi_dt_ids);
+#endif
+
+static struct platform_driver atmel_spi_driver = {
+ .driver = {
+ .name = "atmel_spi",
+ .pm = ATMEL_SPI_PM_OPS,
+ .of_match_table = of_match_ptr(atmel_spi_dt_ids),
+ },
+ .probe = atmel_spi_probe,
+ .remove = atmel_spi_remove,
+};
+module_platform_driver(atmel_spi_driver);
+
+MODULE_DESCRIPTION("Atmel AT32/AT91 SPI Controller driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:atmel_spi");
diff --git a/drivers/spi/spi-au1550.c b/drivers/spi/spi-au1550.c
new file mode 100644
index 000000000..f45e085c0
--- /dev/null
+++ b/drivers/spi/spi-au1550.c
@@ -0,0 +1,1002 @@
+/*
+ * au1550 psc spi controller driver
+ * may work also with au1200, au1210, au1250
+ * will not work on au1000, au1100 and au1500 (no full spi controller there)
+ *
+ * Copyright (c) 2006 ATRON electronic GmbH
+ * Author: Jan Nikitenko <jan.nikitenko@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/resource.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/dma-mapping.h>
+#include <linux/completion.h>
+#include <asm/mach-au1x00/au1000.h>
+#include <asm/mach-au1x00/au1xxx_psc.h>
+#include <asm/mach-au1x00/au1xxx_dbdma.h>
+
+#include <asm/mach-au1x00/au1550_spi.h>
+
+static unsigned usedma = 1;
+module_param(usedma, uint, 0644);
+
+/*
+#define AU1550_SPI_DEBUG_LOOPBACK
+*/
+
+
+#define AU1550_SPI_DBDMA_DESCRIPTORS 1
+#define AU1550_SPI_DMA_RXTMP_MINSIZE 2048U
+
+struct au1550_spi {
+ struct spi_bitbang bitbang;
+
+ volatile psc_spi_t __iomem *regs;
+ int irq;
+
+ unsigned len;
+ unsigned tx_count;
+ unsigned rx_count;
+ const u8 *tx;
+ u8 *rx;
+
+ void (*rx_word)(struct au1550_spi *hw);
+ void (*tx_word)(struct au1550_spi *hw);
+ int (*txrx_bufs)(struct spi_device *spi, struct spi_transfer *t);
+ irqreturn_t (*irq_callback)(struct au1550_spi *hw);
+
+ struct completion master_done;
+
+ unsigned usedma;
+ u32 dma_tx_id;
+ u32 dma_rx_id;
+ u32 dma_tx_ch;
+ u32 dma_rx_ch;
+
+ u8 *dma_rx_tmpbuf;
+ unsigned dma_rx_tmpbuf_size;
+ u32 dma_rx_tmpbuf_addr;
+
+ struct spi_master *master;
+ struct device *dev;
+ struct au1550_spi_info *pdata;
+ struct resource *ioarea;
+};
+
+
+/* we use an 8-bit memory device for dma transfers to/from spi fifo */
+static dbdev_tab_t au1550_spi_mem_dbdev =
+{
+ .dev_id = DBDMA_MEM_CHAN,
+ .dev_flags = DEV_FLAGS_ANYUSE|DEV_FLAGS_SYNC,
+ .dev_tsize = 0,
+ .dev_devwidth = 8,
+ .dev_physaddr = 0x00000000,
+ .dev_intlevel = 0,
+ .dev_intpolarity = 0
+};
+
+static int ddma_memid; /* id to above mem dma device */
+
+static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw);
+
+
+/*
+ * compute BRG and DIV bits to setup spi clock based on main input clock rate
+ * that was specified in platform data structure
+ * according to au1550 datasheet:
+ * psc_tempclk = psc_mainclk / (2 << DIV)
+ * spiclk = psc_tempclk / (2 * (BRG + 1))
+ * BRG valid range is 4..63
+ * DIV valid range is 0..3
+ */
+static u32 au1550_spi_baudcfg(struct au1550_spi *hw, unsigned speed_hz)
+{
+ u32 mainclk_hz = hw->pdata->mainclk_hz;
+ u32 div, brg;
+
+ for (div = 0; div < 4; div++) {
+ brg = mainclk_hz / speed_hz / (4 << div);
+ /* now we have BRG+1 in brg, so count with that */
+ if (brg < (4 + 1)) {
+ brg = (4 + 1); /* speed_hz too big */
+ break; /* set lowest brg (div is == 0) */
+ }
+ if (brg <= (63 + 1))
+ break; /* we have valid brg and div */
+ }
+ if (div == 4) {
+ div = 3; /* speed_hz too small */
+ brg = (63 + 1); /* set highest brg and div */
+ }
+ brg--;
+ return PSC_SPICFG_SET_BAUD(brg) | PSC_SPICFG_SET_DIV(div);
+}
+
+static inline void au1550_spi_mask_ack_all(struct au1550_spi *hw)
+{
+ hw->regs->psc_spimsk =
+ PSC_SPIMSK_MM | PSC_SPIMSK_RR | PSC_SPIMSK_RO
+ | PSC_SPIMSK_RU | PSC_SPIMSK_TR | PSC_SPIMSK_TO
+ | PSC_SPIMSK_TU | PSC_SPIMSK_SD | PSC_SPIMSK_MD;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_spievent =
+ PSC_SPIEVNT_MM | PSC_SPIEVNT_RR | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TR | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD | PSC_SPIEVNT_MD;
+ wmb(); /* drain writebuffer */
+}
+
+static void au1550_spi_reset_fifos(struct au1550_spi *hw)
+{
+ u32 pcr;
+
+ hw->regs->psc_spipcr = PSC_SPIPCR_RC | PSC_SPIPCR_TC;
+ wmb(); /* drain writebuffer */
+ do {
+ pcr = hw->regs->psc_spipcr;
+ wmb(); /* drain writebuffer */
+ } while (pcr != 0);
+}
+
+/*
+ * dma transfers are used for the most common spi word size of 8-bits
+ * we cannot easily change already set up dma channels' width, so if we wanted
+ * dma support for more than 8-bit words (up to 24 bits), we would need to
+ * setup dma channels from scratch on each spi transfer, based on bits_per_word
+ * instead we have pre set up 8 bit dma channels supporting spi 4 to 8 bits
+ * transfers, and 9 to 24 bits spi transfers will be done in pio irq based mode
+ * callbacks to handle dma or pio are set up in au1550_spi_bits_handlers_set()
+ */
+static void au1550_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ u32 cfg, stat;
+
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ if (hw->pdata->deactivate_cs)
+ hw->pdata->deactivate_cs(hw->pdata, spi->chip_select,
+ cspol);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
+
+ cfg = hw->regs->psc_spicfg;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ if (spi->mode & SPI_CPOL)
+ cfg |= PSC_SPICFG_BI;
+ else
+ cfg &= ~PSC_SPICFG_BI;
+ if (spi->mode & SPI_CPHA)
+ cfg &= ~PSC_SPICFG_CDE;
+ else
+ cfg |= PSC_SPICFG_CDE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ cfg |= PSC_SPICFG_MLF;
+ else
+ cfg &= ~PSC_SPICFG_MLF;
+
+ if (hw->usedma && spi->bits_per_word <= 8)
+ cfg &= ~PSC_SPICFG_DD_DISABLE;
+ else
+ cfg |= PSC_SPICFG_DD_DISABLE;
+ cfg = PSC_SPICFG_CLR_LEN(cfg);
+ cfg |= PSC_SPICFG_SET_LEN(spi->bits_per_word);
+
+ cfg = PSC_SPICFG_CLR_BAUD(cfg);
+ cfg &= ~PSC_SPICFG_SET_DIV(3);
+ cfg |= au1550_spi_baudcfg(hw, spi->max_speed_hz);
+
+ hw->regs->psc_spicfg = cfg | PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+
+ if (hw->pdata->activate_cs)
+ hw->pdata->activate_cs(hw->pdata, spi->chip_select,
+ cspol);
+ break;
+ }
+}
+
+static int au1550_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned bpw, hz;
+ u32 cfg, stat;
+
+ bpw = spi->bits_per_word;
+ hz = spi->max_speed_hz;
+ if (t) {
+ if (t->bits_per_word)
+ bpw = t->bits_per_word;
+ if (t->speed_hz)
+ hz = t->speed_hz;
+ }
+
+ if (!hz)
+ return -EINVAL;
+
+ au1550_spi_bits_handlers_set(hw, spi->bits_per_word);
+
+ cfg = hw->regs->psc_spicfg;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spicfg = cfg & ~PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ if (hw->usedma && bpw <= 8)
+ cfg &= ~PSC_SPICFG_DD_DISABLE;
+ else
+ cfg |= PSC_SPICFG_DD_DISABLE;
+ cfg = PSC_SPICFG_CLR_LEN(cfg);
+ cfg |= PSC_SPICFG_SET_LEN(bpw);
+
+ cfg = PSC_SPICFG_CLR_BAUD(cfg);
+ cfg &= ~PSC_SPICFG_SET_DIV(3);
+ cfg |= au1550_spi_baudcfg(hw, hz);
+
+ hw->regs->psc_spicfg = cfg;
+ wmb(); /* drain writebuffer */
+
+ if (cfg & PSC_SPICFG_DE_ENABLE) {
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+ }
+
+ au1550_spi_reset_fifos(hw);
+ au1550_spi_mask_ack_all(hw);
+ return 0;
+}
+
+/*
+ * for dma spi transfers, we have to setup rx channel, otherwise there is
+ * no reliable way how to recognize that spi transfer is done
+ * dma complete callbacks are called before real spi transfer is finished
+ * and if only tx dma channel is set up (and rx fifo overflow event masked)
+ * spi master done event irq is not generated unless rx fifo is empty (emptied)
+ * so we need rx tmp buffer to use for rx dma if user does not provide one
+ */
+static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
+{
+ hw->dma_rx_tmpbuf = kmalloc(size, GFP_KERNEL);
+ if (!hw->dma_rx_tmpbuf)
+ return -ENOMEM;
+ hw->dma_rx_tmpbuf_size = size;
+ hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
+ size, DMA_FROM_DEVICE);
+ if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
+ kfree(hw->dma_rx_tmpbuf);
+ hw->dma_rx_tmpbuf = 0;
+ hw->dma_rx_tmpbuf_size = 0;
+ return -EFAULT;
+ }
+ return 0;
+}
+
+static void au1550_spi_dma_rxtmp_free(struct au1550_spi *hw)
+{
+ dma_unmap_single(hw->dev, hw->dma_rx_tmpbuf_addr,
+ hw->dma_rx_tmpbuf_size, DMA_FROM_DEVICE);
+ kfree(hw->dma_rx_tmpbuf);
+ hw->dma_rx_tmpbuf = 0;
+ hw->dma_rx_tmpbuf_size = 0;
+}
+
+static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ dma_addr_t dma_tx_addr;
+ dma_addr_t dma_rx_addr;
+ u32 res;
+
+ hw->len = t->len;
+ hw->tx_count = 0;
+ hw->rx_count = 0;
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ dma_tx_addr = t->tx_dma;
+ dma_rx_addr = t->rx_dma;
+
+ /*
+ * check if buffers are already dma mapped, map them otherwise:
+ * - first map the TX buffer, so cache data gets written to memory
+ * - then map the RX buffer, so that cache entries (with
+ * soon-to-be-stale data) get removed
+ * use rx buffer in place of tx if tx buffer was not provided
+ * use temp rx buffer (preallocated or realloc to fit) for rx dma
+ */
+ if (t->tx_buf) {
+ if (t->tx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
+ dma_tx_addr = dma_map_single(hw->dev,
+ (void *)t->tx_buf,
+ t->len, DMA_TO_DEVICE);
+ if (dma_mapping_error(hw->dev, dma_tx_addr))
+ dev_err(hw->dev, "tx dma map error\n");
+ }
+ }
+
+ if (t->rx_buf) {
+ if (t->rx_dma == 0) { /* if DMA_ADDR_INVALID, map it */
+ dma_rx_addr = dma_map_single(hw->dev,
+ (void *)t->rx_buf,
+ t->len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(hw->dev, dma_rx_addr))
+ dev_err(hw->dev, "rx dma map error\n");
+ }
+ } else {
+ if (t->len > hw->dma_rx_tmpbuf_size) {
+ int ret;
+
+ au1550_spi_dma_rxtmp_free(hw);
+ ret = au1550_spi_dma_rxtmp_alloc(hw, max(t->len,
+ AU1550_SPI_DMA_RXTMP_MINSIZE));
+ if (ret < 0)
+ return ret;
+ }
+ hw->rx = hw->dma_rx_tmpbuf;
+ dma_rx_addr = hw->dma_rx_tmpbuf_addr;
+ dma_sync_single_for_device(hw->dev, dma_rx_addr,
+ t->len, DMA_FROM_DEVICE);
+ }
+
+ if (!t->tx_buf) {
+ dma_sync_single_for_device(hw->dev, dma_rx_addr,
+ t->len, DMA_BIDIRECTIONAL);
+ hw->tx = hw->rx;
+ }
+
+ /* put buffers on the ring */
+ res = au1xxx_dbdma_put_dest(hw->dma_rx_ch, virt_to_phys(hw->rx),
+ t->len, DDMA_FLAGS_IE);
+ if (!res)
+ dev_err(hw->dev, "rx dma put dest error\n");
+
+ res = au1xxx_dbdma_put_source(hw->dma_tx_ch, virt_to_phys(hw->tx),
+ t->len, DDMA_FLAGS_IE);
+ if (!res)
+ dev_err(hw->dev, "tx dma put source error\n");
+
+ au1xxx_dbdma_start(hw->dma_rx_ch);
+ au1xxx_dbdma_start(hw->dma_tx_ch);
+
+ /* by default enable nearly all events interrupt */
+ hw->regs->psc_spimsk = PSC_SPIMSK_SD;
+ wmb(); /* drain writebuffer */
+
+ /* start the transfer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+
+ wait_for_completion(&hw->master_done);
+
+ au1xxx_dbdma_stop(hw->dma_tx_ch);
+ au1xxx_dbdma_stop(hw->dma_rx_ch);
+
+ if (!t->rx_buf) {
+ /* using the temporal preallocated and premapped buffer */
+ dma_sync_single_for_cpu(hw->dev, dma_rx_addr, t->len,
+ DMA_FROM_DEVICE);
+ }
+ /* unmap buffers if mapped above */
+ if (t->rx_buf && t->rx_dma == 0 )
+ dma_unmap_single(hw->dev, dma_rx_addr, t->len,
+ DMA_FROM_DEVICE);
+ if (t->tx_buf && t->tx_dma == 0 )
+ dma_unmap_single(hw->dev, dma_tx_addr, t->len,
+ DMA_TO_DEVICE);
+
+ return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
+}
+
+static irqreturn_t au1550_spi_dma_irq_callback(struct au1550_spi *hw)
+{
+ u32 stat, evnt;
+
+ stat = hw->regs->psc_spistat;
+ evnt = hw->regs->psc_spievent;
+ wmb(); /* drain writebuffer */
+ if ((stat & PSC_SPISTAT_DI) == 0) {
+ dev_err(hw->dev, "Unexpected IRQ!\n");
+ return IRQ_NONE;
+ }
+
+ if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_TU | PSC_SPIEVNT_SD))
+ != 0) {
+ /*
+ * due to an spi error we consider transfer as done,
+ * so mask all events until before next transfer start
+ * and stop the possibly running dma immediately
+ */
+ au1550_spi_mask_ack_all(hw);
+ au1xxx_dbdma_stop(hw->dma_rx_ch);
+ au1xxx_dbdma_stop(hw->dma_tx_ch);
+
+ /* get number of transferred bytes */
+ hw->rx_count = hw->len - au1xxx_get_dma_residue(hw->dma_rx_ch);
+ hw->tx_count = hw->len - au1xxx_get_dma_residue(hw->dma_tx_ch);
+
+ au1xxx_dbdma_reset(hw->dma_rx_ch);
+ au1xxx_dbdma_reset(hw->dma_tx_ch);
+ au1550_spi_reset_fifos(hw);
+
+ if (evnt == PSC_SPIEVNT_RO)
+ dev_err(hw->dev,
+ "dma transfer: receive FIFO overflow!\n");
+ else
+ dev_err(hw->dev,
+ "dma transfer: unexpected SPI error "
+ "(event=0x%x stat=0x%x)!\n", evnt, stat);
+
+ complete(&hw->master_done);
+ return IRQ_HANDLED;
+ }
+
+ if ((evnt & PSC_SPIEVNT_MD) != 0) {
+ /* transfer completed successfully */
+ au1550_spi_mask_ack_all(hw);
+ hw->rx_count = hw->len;
+ hw->tx_count = hw->len;
+ complete(&hw->master_done);
+ }
+ return IRQ_HANDLED;
+}
+
+
+/* routines to handle different word sizes in pio mode */
+#define AU1550_SPI_RX_WORD(size, mask) \
+static void au1550_spi_rx_word_##size(struct au1550_spi *hw) \
+{ \
+ u32 fifoword = hw->regs->psc_spitxrx & (u32)(mask); \
+ wmb(); /* drain writebuffer */ \
+ if (hw->rx) { \
+ *(u##size *)hw->rx = (u##size)fifoword; \
+ hw->rx += (size) / 8; \
+ } \
+ hw->rx_count += (size) / 8; \
+}
+
+#define AU1550_SPI_TX_WORD(size, mask) \
+static void au1550_spi_tx_word_##size(struct au1550_spi *hw) \
+{ \
+ u32 fifoword = 0; \
+ if (hw->tx) { \
+ fifoword = *(u##size *)hw->tx & (u32)(mask); \
+ hw->tx += (size) / 8; \
+ } \
+ hw->tx_count += (size) / 8; \
+ if (hw->tx_count >= hw->len) \
+ fifoword |= PSC_SPITXRX_LC; \
+ hw->regs->psc_spitxrx = fifoword; \
+ wmb(); /* drain writebuffer */ \
+}
+
+AU1550_SPI_RX_WORD(8,0xff)
+AU1550_SPI_RX_WORD(16,0xffff)
+AU1550_SPI_RX_WORD(32,0xffffff)
+AU1550_SPI_TX_WORD(8,0xff)
+AU1550_SPI_TX_WORD(16,0xffff)
+AU1550_SPI_TX_WORD(32,0xffffff)
+
+static int au1550_spi_pio_txrxb(struct spi_device *spi, struct spi_transfer *t)
+{
+ u32 stat, mask;
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->tx_count = 0;
+ hw->rx_count = 0;
+
+ /* by default enable nearly all events after filling tx fifo */
+ mask = PSC_SPIMSK_SD;
+
+ /* fill the transmit FIFO */
+ while (hw->tx_count < hw->len) {
+
+ hw->tx_word(hw);
+
+ if (hw->tx_count >= hw->len) {
+ /* mask tx fifo request interrupt as we are done */
+ mask |= PSC_SPIMSK_TR;
+ }
+
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ if (stat & PSC_SPISTAT_TF)
+ break;
+ }
+
+ /* enable event interrupts */
+ hw->regs->psc_spimsk = mask;
+ wmb(); /* drain writebuffer */
+
+ /* start the transfer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+
+ wait_for_completion(&hw->master_done);
+
+ return hw->rx_count < hw->tx_count ? hw->rx_count : hw->tx_count;
+}
+
+static irqreturn_t au1550_spi_pio_irq_callback(struct au1550_spi *hw)
+{
+ int busy;
+ u32 stat, evnt;
+
+ stat = hw->regs->psc_spistat;
+ evnt = hw->regs->psc_spievent;
+ wmb(); /* drain writebuffer */
+ if ((stat & PSC_SPISTAT_DI) == 0) {
+ dev_err(hw->dev, "Unexpected IRQ!\n");
+ return IRQ_NONE;
+ }
+
+ if ((evnt & (PSC_SPIEVNT_MM | PSC_SPIEVNT_RO
+ | PSC_SPIEVNT_RU | PSC_SPIEVNT_TO
+ | PSC_SPIEVNT_SD))
+ != 0) {
+ /*
+ * due to an error we consider transfer as done,
+ * so mask all events until before next transfer start
+ */
+ au1550_spi_mask_ack_all(hw);
+ au1550_spi_reset_fifos(hw);
+ dev_err(hw->dev,
+ "pio transfer: unexpected SPI error "
+ "(event=0x%x stat=0x%x)!\n", evnt, stat);
+ complete(&hw->master_done);
+ return IRQ_HANDLED;
+ }
+
+ /*
+ * while there is something to read from rx fifo
+ * or there is a space to write to tx fifo:
+ */
+ do {
+ busy = 0;
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Take care to not let the Rx FIFO overflow.
+ *
+ * We only write a byte if we have read one at least. Initially,
+ * the write fifo is full, so we should read from the read fifo
+ * first.
+ * In case we miss a word from the read fifo, we should get a
+ * RO event and should back out.
+ */
+ if (!(stat & PSC_SPISTAT_RE) && hw->rx_count < hw->len) {
+ hw->rx_word(hw);
+ busy = 1;
+
+ if (!(stat & PSC_SPISTAT_TF) && hw->tx_count < hw->len)
+ hw->tx_word(hw);
+ }
+ } while (busy);
+
+ hw->regs->psc_spievent = PSC_SPIEVNT_RR | PSC_SPIEVNT_TR;
+ wmb(); /* drain writebuffer */
+
+ /*
+ * Restart the SPI transmission in case of a transmit underflow.
+ * This seems to work despite the notes in the Au1550 data book
+ * of Figure 8-4 with flowchart for SPI master operation:
+ *
+ * """Note 1: An XFR Error Interrupt occurs, unless masked,
+ * for any of the following events: Tx FIFO Underflow,
+ * Rx FIFO Overflow, or Multiple-master Error
+ * Note 2: In case of a Tx Underflow Error, all zeroes are
+ * transmitted."""
+ *
+ * By simply restarting the spi transfer on Tx Underflow Error,
+ * we assume that spi transfer was paused instead of zeroes
+ * transmittion mentioned in the Note 2 of Au1550 data book.
+ */
+ if (evnt & PSC_SPIEVNT_TU) {
+ hw->regs->psc_spievent = PSC_SPIEVNT_TU | PSC_SPIEVNT_MD;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_spipcr = PSC_SPIPCR_MS;
+ wmb(); /* drain writebuffer */
+ }
+
+ if (hw->rx_count >= hw->len) {
+ /* transfer completed successfully */
+ au1550_spi_mask_ack_all(hw);
+ complete(&hw->master_done);
+ }
+ return IRQ_HANDLED;
+}
+
+static int au1550_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct au1550_spi *hw = spi_master_get_devdata(spi->master);
+ return hw->txrx_bufs(spi, t);
+}
+
+static irqreturn_t au1550_spi_irq(int irq, void *dev)
+{
+ struct au1550_spi *hw = dev;
+ return hw->irq_callback(hw);
+}
+
+static void au1550_spi_bits_handlers_set(struct au1550_spi *hw, int bpw)
+{
+ if (bpw <= 8) {
+ if (hw->usedma) {
+ hw->txrx_bufs = &au1550_spi_dma_txrxb;
+ hw->irq_callback = &au1550_spi_dma_irq_callback;
+ } else {
+ hw->rx_word = &au1550_spi_rx_word_8;
+ hw->tx_word = &au1550_spi_tx_word_8;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ }
+ } else if (bpw <= 16) {
+ hw->rx_word = &au1550_spi_rx_word_16;
+ hw->tx_word = &au1550_spi_tx_word_16;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ } else {
+ hw->rx_word = &au1550_spi_rx_word_32;
+ hw->tx_word = &au1550_spi_tx_word_32;
+ hw->txrx_bufs = &au1550_spi_pio_txrxb;
+ hw->irq_callback = &au1550_spi_pio_irq_callback;
+ }
+}
+
+static void au1550_spi_setup_psc_as_spi(struct au1550_spi *hw)
+{
+ u32 stat, cfg;
+
+ /* set up the PSC for SPI mode */
+ hw->regs->psc_ctrl = PSC_CTRL_DISABLE;
+ wmb(); /* drain writebuffer */
+ hw->regs->psc_sel = PSC_SEL_PS_SPIMODE;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_spicfg = 0;
+ wmb(); /* drain writebuffer */
+
+ hw->regs->psc_ctrl = PSC_CTRL_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_SR) == 0);
+
+
+ cfg = hw->usedma ? 0 : PSC_SPICFG_DD_DISABLE;
+ cfg |= PSC_SPICFG_SET_LEN(8);
+ cfg |= PSC_SPICFG_RT_FIFO8 | PSC_SPICFG_TT_FIFO8;
+ /* use minimal allowed brg and div values as initial setting: */
+ cfg |= PSC_SPICFG_SET_BAUD(4) | PSC_SPICFG_SET_DIV(0);
+
+#ifdef AU1550_SPI_DEBUG_LOOPBACK
+ cfg |= PSC_SPICFG_LB;
+#endif
+
+ hw->regs->psc_spicfg = cfg;
+ wmb(); /* drain writebuffer */
+
+ au1550_spi_mask_ack_all(hw);
+
+ hw->regs->psc_spicfg |= PSC_SPICFG_DE_ENABLE;
+ wmb(); /* drain writebuffer */
+
+ do {
+ stat = hw->regs->psc_spistat;
+ wmb(); /* drain writebuffer */
+ } while ((stat & PSC_SPISTAT_DR) == 0);
+
+ au1550_spi_reset_fifos(hw);
+}
+
+
+static int au1550_spi_probe(struct platform_device *pdev)
+{
+ struct au1550_spi *hw;
+ struct spi_master *master;
+ struct resource *r;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct au1550_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ err = -ENOMEM;
+ goto err_nomem;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 24);
+
+ hw = spi_master_get_devdata(master);
+
+ hw->master = master;
+ hw->pdata = dev_get_platdata(&pdev->dev);
+ hw->dev = &pdev->dev;
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_no_pdata;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no IRQ\n");
+ err = -ENODEV;
+ goto err_no_iores;
+ }
+ hw->irq = r->start;
+
+ hw->usedma = 0;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r) {
+ hw->dma_tx_id = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r) {
+ hw->dma_rx_id = r->start;
+ if (usedma && ddma_memid) {
+ if (pdev->dev.dma_mask == NULL)
+ dev_warn(&pdev->dev, "no dma mask\n");
+ else
+ hw->usedma = 1;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "no mmio resource\n");
+ err = -ENODEV;
+ goto err_no_iores;
+ }
+
+ hw->ioarea = request_mem_region(r->start, sizeof(psc_spi_t),
+ pdev->name);
+ if (!hw->ioarea) {
+ dev_err(&pdev->dev, "Cannot reserve iomem region\n");
+ err = -ENXIO;
+ goto err_no_iores;
+ }
+
+ hw->regs = (psc_spi_t __iomem *)ioremap(r->start, sizeof(psc_spi_t));
+ if (!hw->regs) {
+ dev_err(&pdev->dev, "cannot ioremap\n");
+ err = -ENXIO;
+ goto err_ioremap;
+ }
+
+ platform_set_drvdata(pdev, hw);
+
+ init_completion(&hw->master_done);
+
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = au1550_spi_setupxfer;
+ hw->bitbang.chipselect = au1550_spi_chipsel;
+ hw->bitbang.txrx_bufs = au1550_spi_txrx_bufs;
+
+ if (hw->usedma) {
+ hw->dma_tx_ch = au1xxx_dbdma_chan_alloc(ddma_memid,
+ hw->dma_tx_id, NULL, (void *)hw);
+ if (hw->dma_tx_ch == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate tx dma channel\n");
+ err = -ENXIO;
+ goto err_no_txdma;
+ }
+ au1xxx_dbdma_set_devwidth(hw->dma_tx_ch, 8);
+ if (au1xxx_dbdma_ring_alloc(hw->dma_tx_ch,
+ AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate tx dma descriptors\n");
+ err = -ENXIO;
+ goto err_no_txdma_descr;
+ }
+
+
+ hw->dma_rx_ch = au1xxx_dbdma_chan_alloc(hw->dma_rx_id,
+ ddma_memid, NULL, (void *)hw);
+ if (hw->dma_rx_ch == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate rx dma channel\n");
+ err = -ENXIO;
+ goto err_no_rxdma;
+ }
+ au1xxx_dbdma_set_devwidth(hw->dma_rx_ch, 8);
+ if (au1xxx_dbdma_ring_alloc(hw->dma_rx_ch,
+ AU1550_SPI_DBDMA_DESCRIPTORS) == 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate rx dma descriptors\n");
+ err = -ENXIO;
+ goto err_no_rxdma_descr;
+ }
+
+ err = au1550_spi_dma_rxtmp_alloc(hw,
+ AU1550_SPI_DMA_RXTMP_MINSIZE);
+ if (err < 0) {
+ dev_err(&pdev->dev,
+ "Cannot allocate initial rx dma tmp buffer\n");
+ goto err_dma_rxtmp_alloc;
+ }
+ }
+
+ au1550_spi_bits_handlers_set(hw, 8);
+
+ err = request_irq(hw->irq, au1550_spi_irq, 0, pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_no_irq;
+ }
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = hw->pdata->num_chipselect;
+
+ /*
+ * precompute valid range for spi freq - from au1550 datasheet:
+ * psc_tempclk = psc_mainclk / (2 << DIV)
+ * spiclk = psc_tempclk / (2 * (BRG + 1))
+ * BRG valid range is 4..63
+ * DIV valid range is 0..3
+ * round the min and max frequencies to values that would still
+ * produce valid brg and div
+ */
+ {
+ int min_div = (2 << 0) * (2 * (4 + 1));
+ int max_div = (2 << 3) * (2 * (63 + 1));
+ master->max_speed_hz = hw->pdata->mainclk_hz / min_div;
+ master->min_speed_hz =
+ hw->pdata->mainclk_hz / (max_div + 1) + 1;
+ }
+
+ au1550_spi_setup_psc_as_spi(hw);
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ dev_info(&pdev->dev,
+ "spi master registered: bus_num=%d num_chipselect=%d\n",
+ master->bus_num, master->num_chipselect);
+
+ return 0;
+
+err_register:
+ free_irq(hw->irq, hw);
+
+err_no_irq:
+ au1550_spi_dma_rxtmp_free(hw);
+
+err_dma_rxtmp_alloc:
+err_no_rxdma_descr:
+ if (hw->usedma)
+ au1xxx_dbdma_chan_free(hw->dma_rx_ch);
+
+err_no_rxdma:
+err_no_txdma_descr:
+ if (hw->usedma)
+ au1xxx_dbdma_chan_free(hw->dma_tx_ch);
+
+err_no_txdma:
+ iounmap((void __iomem *)hw->regs);
+
+err_ioremap:
+ release_mem_region(r->start, sizeof(psc_spi_t));
+
+err_no_iores:
+err_no_pdata:
+ spi_master_put(hw->master);
+
+err_nomem:
+ return err;
+}
+
+static int au1550_spi_remove(struct platform_device *pdev)
+{
+ struct au1550_spi *hw = platform_get_drvdata(pdev);
+
+ dev_info(&pdev->dev, "spi master remove: bus_num=%d\n",
+ hw->master->bus_num);
+
+ spi_bitbang_stop(&hw->bitbang);
+ free_irq(hw->irq, hw);
+ iounmap((void __iomem *)hw->regs);
+ release_mem_region(hw->ioarea->start, sizeof(psc_spi_t));
+
+ if (hw->usedma) {
+ au1550_spi_dma_rxtmp_free(hw);
+ au1xxx_dbdma_chan_free(hw->dma_rx_ch);
+ au1xxx_dbdma_chan_free(hw->dma_tx_ch);
+ }
+
+ spi_master_put(hw->master);
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:au1550-spi");
+
+static struct platform_driver au1550_spi_drv = {
+ .probe = au1550_spi_probe,
+ .remove = au1550_spi_remove,
+ .driver = {
+ .name = "au1550-spi",
+ },
+};
+
+static int __init au1550_spi_init(void)
+{
+ /*
+ * create memory device with 8 bits dev_devwidth
+ * needed for proper byte ordering to spi fifo
+ */
+ switch (alchemy_get_cputype()) {
+ case ALCHEMY_CPU_AU1550:
+ case ALCHEMY_CPU_AU1200:
+ case ALCHEMY_CPU_AU1300:
+ break;
+ default:
+ return -ENODEV;
+ }
+
+ if (usedma) {
+ ddma_memid = au1xxx_ddma_add_device(&au1550_spi_mem_dbdev);
+ if (!ddma_memid)
+ printk(KERN_ERR "au1550-spi: cannot add memory"
+ "dbdma device\n");
+ }
+ return platform_driver_register(&au1550_spi_drv);
+}
+module_init(au1550_spi_init);
+
+static void __exit au1550_spi_exit(void)
+{
+ if (usedma && ddma_memid)
+ au1xxx_ddma_del_device(ddma_memid);
+ platform_driver_unregister(&au1550_spi_drv);
+}
+module_exit(au1550_spi_exit);
+
+MODULE_DESCRIPTION("Au1550 PSC SPI Driver");
+MODULE_AUTHOR("Jan Nikitenko <jan.nikitenko@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c
new file mode 100644
index 000000000..37875cf94
--- /dev/null
+++ b/drivers/spi/spi-bcm2835.c
@@ -0,0 +1,517 @@
+/*
+ * Driver for Broadcom BCM2835 SPI Controllers
+ *
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * This driver is inspired by:
+ * spi-ath79.c, Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
+ * spi-atmel.c, Copyright (C) 2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/spi/spi.h>
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS 0x00
+#define BCM2835_SPI_FIFO 0x04
+#define BCM2835_SPI_CLK 0x08
+#define BCM2835_SPI_DLEN 0x0c
+#define BCM2835_SPI_LTOH 0x10
+#define BCM2835_SPI_DC 0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG 0x02000000
+#define BCM2835_SPI_CS_DMA_LEN 0x01000000
+#define BCM2835_SPI_CS_CSPOL2 0x00800000
+#define BCM2835_SPI_CS_CSPOL1 0x00400000
+#define BCM2835_SPI_CS_CSPOL0 0x00200000
+#define BCM2835_SPI_CS_RXF 0x00100000
+#define BCM2835_SPI_CS_RXR 0x00080000
+#define BCM2835_SPI_CS_TXD 0x00040000
+#define BCM2835_SPI_CS_RXD 0x00020000
+#define BCM2835_SPI_CS_DONE 0x00010000
+#define BCM2835_SPI_CS_LEN 0x00002000
+#define BCM2835_SPI_CS_REN 0x00001000
+#define BCM2835_SPI_CS_ADCS 0x00000800
+#define BCM2835_SPI_CS_INTR 0x00000400
+#define BCM2835_SPI_CS_INTD 0x00000200
+#define BCM2835_SPI_CS_DMAEN 0x00000100
+#define BCM2835_SPI_CS_TA 0x00000080
+#define BCM2835_SPI_CS_CSPOL 0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX 0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX 0x00000010
+#define BCM2835_SPI_CS_CPOL 0x00000008
+#define BCM2835_SPI_CS_CPHA 0x00000004
+#define BCM2835_SPI_CS_CS_10 0x00000002
+#define BCM2835_SPI_CS_CS_01 0x00000001
+
+#define BCM2835_SPI_POLLING_LIMIT_US 30
+#define BCM2835_SPI_TIMEOUT_MS 30000
+#define BCM2835_SPI_MODE_BITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+ | SPI_NO_CS | SPI_3WIRE)
+
+#define DRV_NAME "spi-bcm2835"
+
+struct bcm2835_spi {
+ void __iomem *regs;
+ struct clk *clk;
+ int irq;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int tx_len;
+ int rx_len;
+};
+
+static inline u32 bcm2835_rd(struct bcm2835_spi *bs, unsigned reg)
+{
+ return readl(bs->regs + reg);
+}
+
+static inline void bcm2835_wr(struct bcm2835_spi *bs, unsigned reg, u32 val)
+{
+ writel(val, bs->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct bcm2835_spi *bs)
+{
+ u8 byte;
+
+ while ((bs->rx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
+ byte = bcm2835_rd(bs, BCM2835_SPI_FIFO);
+ if (bs->rx_buf)
+ *bs->rx_buf++ = byte;
+ bs->rx_len--;
+ }
+}
+
+static inline void bcm2835_wr_fifo(struct bcm2835_spi *bs)
+{
+ u8 byte;
+
+ while ((bs->tx_len) &&
+ (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
+ byte = bs->tx_buf ? *bs->tx_buf++ : 0;
+ bcm2835_wr(bs, BCM2835_SPI_FIFO, byte);
+ bs->tx_len--;
+ }
+}
+
+static void bcm2835_spi_reset_hw(struct spi_master *master)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* Disable SPI interrupts and transfer */
+ cs &= ~(BCM2835_SPI_CS_INTR |
+ BCM2835_SPI_CS_INTD |
+ BCM2835_SPI_CS_TA);
+ /* and reset RX/TX FIFOS */
+ cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+ /* and reset the SPI_HW */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+}
+
+static irqreturn_t bcm2835_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* Read as many bytes as possible from FIFO */
+ bcm2835_rd_fifo(bs);
+ /* Write as many bytes as possible to FIFO */
+ bcm2835_wr_fifo(bs);
+
+ /* based on flags decide if we can finish the transfer */
+ if (bcm2835_rd(bs, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(master);
+ /* wake up the framework */
+ complete(&master->xfer_completion);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs,
+ unsigned long xfer_time_us)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ /* set timeout to 1 second of maximum polling */
+ unsigned long timeout = jiffies + HZ;
+
+ /* enable HW block without interrupts */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
+
+ /* loop until finished the transfer */
+ while (bs->rx_len) {
+ /* read from fifo as much as possible */
+ bcm2835_rd_fifo(bs);
+ /* fill in tx fifo as much as possible */
+ bcm2835_wr_fifo(bs);
+ /* if we still expect some data after the read,
+ * check for a possible timeout
+ */
+ if (bs->rx_len && time_after(jiffies, timeout)) {
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(master);
+ /* and return timeout */
+ return -ETIMEDOUT;
+ }
+ }
+
+ /* Transfer complete - reset SPI HW */
+ bcm2835_spi_reset_hw(master);
+ /* and return without waiting for completion */
+ return 0;
+}
+
+static int bcm2835_spi_transfer_one_irq(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr,
+ u32 cs)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* fill in fifo if we have gpio-cs
+ * note that there have been rare events where the native-CS
+ * flapped for <1us which may change the behaviour
+ * with gpio-cs this does not happen, so it is implemented
+ * only for this case
+ */
+ if (gpio_is_valid(spi->cs_gpio)) {
+ /* enable HW block, but without interrupts enabled
+ * this would triggern an immediate interrupt
+ */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ cs | BCM2835_SPI_CS_TA);
+ /* fill in tx fifo as much as possible */
+ bcm2835_wr_fifo(bs);
+ }
+
+ /*
+ * Enable the HW block. This will immediately trigger a DONE (TX
+ * empty) interrupt, upon which we will fill the TX FIFO with the
+ * first TX bytes. Pre-filling the TX FIFO here to avoid the
+ * interrupt doesn't work:-(
+ */
+ cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD | BCM2835_SPI_CS_TA;
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+ /* signal that we need to wait for completion */
+ return 1;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ unsigned long spi_hz, clk_hz, cdiv;
+ unsigned long spi_used_hz, xfer_time_us;
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+
+ /* set clock */
+ spi_hz = tfr->speed_hz;
+ clk_hz = clk_get_rate(bs->clk);
+
+ if (spi_hz >= clk_hz / 2) {
+ cdiv = 2; /* clk_hz/2 is the fastest we can go */
+ } else if (spi_hz) {
+ /* CDIV must be a multiple of two */
+ cdiv = DIV_ROUND_UP(clk_hz, spi_hz);
+ cdiv += (cdiv % 2);
+
+ if (cdiv >= 65536)
+ cdiv = 0; /* 0 is the slowest we can go */
+ } else {
+ cdiv = 0; /* 0 is the slowest we can go */
+ }
+ spi_used_hz = cdiv ? (clk_hz / cdiv) : (clk_hz / 65536);
+ bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+
+ /* handle all the modes */
+ if ((spi->mode & SPI_3WIRE) && (tfr->rx_buf))
+ cs |= BCM2835_SPI_CS_REN;
+ if (spi->mode & SPI_CPOL)
+ cs |= BCM2835_SPI_CS_CPOL;
+ if (spi->mode & SPI_CPHA)
+ cs |= BCM2835_SPI_CS_CPHA;
+
+ /* for gpio_cs set dummy CS so that no HW-CS get changed
+ * we can not run this in bcm2835_spi_set_cs, as it does
+ * not get called for cs_gpio cases, so we need to do it here
+ */
+ if (gpio_is_valid(spi->cs_gpio) || (spi->mode & SPI_NO_CS))
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+
+ /* set transmit buffers and length */
+ bs->tx_buf = tfr->tx_buf;
+ bs->rx_buf = tfr->rx_buf;
+ bs->tx_len = tfr->len;
+ bs->rx_len = tfr->len;
+
+ /* calculate the estimated time in us the transfer runs */
+ xfer_time_us = tfr->len
+ * 9 /* clocks/byte - SPI-HW waits 1 clock after each byte */
+ * 1000000 / spi_used_hz;
+
+ /* for short requests run polling*/
+ if (xfer_time_us <= BCM2835_SPI_POLLING_LIMIT_US)
+ return bcm2835_spi_transfer_one_poll(master, spi, tfr,
+ cs, xfer_time_us);
+
+ return bcm2835_spi_transfer_one_irq(master, spi, tfr, cs);
+}
+
+static void bcm2835_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bcm2835_spi_reset_hw(master);
+}
+
+static void bcm2835_spi_set_cs(struct spi_device *spi, bool gpio_level)
+{
+ /*
+ * we can assume that we are "native" as per spi_set_cs
+ * calling us ONLY when cs_gpio is not set
+ * we can also assume that we are CS < 3 as per bcm2835_spi_setup
+ * we would not get called because of error handling there.
+ * the level passed is the electrical level not enabled/disabled
+ * so it has to get translated back to enable/disable
+ * see spi_set_cs in spi.c for the implementation
+ */
+
+ struct spi_master *master = spi->master;
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+ u32 cs = bcm2835_rd(bs, BCM2835_SPI_CS);
+ bool enable;
+
+ /* calculate the enable flag from the passed gpio_level */
+ enable = (spi->mode & SPI_CS_HIGH) ? gpio_level : !gpio_level;
+
+ /* set flags for "reverse" polarity in the registers */
+ if (spi->mode & SPI_CS_HIGH) {
+ /* set the correct CS-bits */
+ cs |= BCM2835_SPI_CS_CSPOL;
+ cs |= BCM2835_SPI_CS_CSPOL0 << spi->chip_select;
+ } else {
+ /* clean the CS-bits */
+ cs &= ~BCM2835_SPI_CS_CSPOL;
+ cs &= ~(BCM2835_SPI_CS_CSPOL0 << spi->chip_select);
+ }
+
+ /* select the correct chip_select depending on disabled/enabled */
+ if (enable) {
+ /* set cs correctly */
+ if (spi->mode & SPI_NO_CS) {
+ /* use the "undefined" chip-select */
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ } else {
+ /* set the chip select */
+ cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
+ cs |= spi->chip_select;
+ }
+ } else {
+ /* disable CSPOL which puts HW-CS into deselected state */
+ cs &= ~BCM2835_SPI_CS_CSPOL;
+ /* use the "undefined" chip-select as precaution */
+ cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+ }
+
+ /* finally set the calculated flags in SPI_CS */
+ bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+}
+
+static int chip_match_name(struct gpio_chip *chip, void *data)
+{
+ return !strcmp(chip->label, data);
+}
+
+static int bcm2835_spi_setup(struct spi_device *spi)
+{
+ int err;
+ struct gpio_chip *chip;
+ /*
+ * sanity checking the native-chipselects
+ */
+ if (spi->mode & SPI_NO_CS)
+ return 0;
+ if (gpio_is_valid(spi->cs_gpio))
+ return 0;
+ if (spi->chip_select > 1) {
+ /* error in the case of native CS requested with CS > 1
+ * officially there is a CS2, but it is not documented
+ * which GPIO is connected with that...
+ */
+ dev_err(&spi->dev,
+ "setup: only two native chip-selects are supported\n");
+ return -EINVAL;
+ }
+ /* now translate native cs to GPIO */
+
+ /* get the gpio chip for the base */
+ chip = gpiochip_find("pinctrl-bcm2835", chip_match_name);
+ if (!chip)
+ return 0;
+
+ /* and calculate the real CS */
+ spi->cs_gpio = chip->base + 8 - spi->chip_select;
+
+ /* and set up the "mode" and level */
+ dev_info(&spi->dev, "setting up native-CS%i as GPIO %i\n",
+ spi->chip_select, spi->cs_gpio);
+
+ /* set up GPIO as output and pull to the correct level */
+ err = gpio_direction_output(spi->cs_gpio,
+ (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+ if (err) {
+ dev_err(&spi->dev,
+ "could not set CS%i gpio %i as output: %i",
+ spi->chip_select, spi->cs_gpio, err);
+ return err;
+ }
+ /* the implementation of pinctrl-bcm2835 currently does not
+ * set the GPIO value when using gpio_direction_output
+ * so we are setting it here explicitly
+ */
+ gpio_set_value(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+ return 0;
+}
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm2835_spi *bs;
+ struct resource *res;
+ int err;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master() failed\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ master->mode_bits = BCM2835_SPI_MODE_BITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->num_chipselect = 3;
+ master->setup = bcm2835_spi_setup;
+ master->set_cs = bcm2835_spi_set_cs;
+ master->transfer_one = bcm2835_spi_transfer_one;
+ master->handle_err = bcm2835_spi_handle_err;
+ master->dev.of_node = pdev->dev.of_node;
+
+ bs = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(bs->regs)) {
+ err = PTR_ERR(bs->regs);
+ goto out_master_put;
+ }
+
+ bs->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(bs->clk)) {
+ err = PTR_ERR(bs->clk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", err);
+ goto out_master_put;
+ }
+
+ bs->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (bs->irq <= 0) {
+ dev_err(&pdev->dev, "could not get IRQ: %d\n", bs->irq);
+ err = bs->irq ? bs->irq : -ENODEV;
+ goto out_master_put;
+ }
+
+ clk_prepare_enable(bs->clk);
+
+ err = devm_request_irq(&pdev->dev, bs->irq, bcm2835_spi_interrupt, 0,
+ dev_name(&pdev->dev), master);
+ if (err) {
+ dev_err(&pdev->dev, "could not request IRQ: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ /* initialise the hardware with the default polarities */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "could not register SPI master: %d\n", err);
+ goto out_clk_disable;
+ }
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(bs->clk);
+out_master_put:
+ spi_master_put(master);
+ return err;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm2835_spi *bs = spi_master_get_devdata(master);
+
+ /* Clear FIFOs, and disable the HW block */
+ bcm2835_wr(bs, BCM2835_SPI_CS,
+ BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+ { .compatible = "brcm,bcm2835-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = bcm2835_spi_match,
+ },
+ .probe = bcm2835_spi_probe,
+ .remove = bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_DESCRIPTION("SPI controller driver for Broadcom BCM2835");
+MODULE_AUTHOR("Chris Boot <bootc@bootc.net>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-bcm53xx.c b/drivers/spi/spi-bcm53xx.c
new file mode 100644
index 000000000..152055497
--- /dev/null
+++ b/drivers/spi/spi-bcm53xx.c
@@ -0,0 +1,299 @@
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+#include <linux/bcma/bcma.h>
+#include <linux/spi/spi.h>
+
+#include "spi-bcm53xx.h"
+
+#define BCM53XXSPI_MAX_SPI_BAUD 13500000 /* 216 MHz? */
+
+/* The longest observed required wait was 19 ms */
+#define BCM53XXSPI_SPE_TIMEOUT_MS 80
+
+struct bcm53xxspi {
+ struct bcma_device *core;
+ struct spi_master *master;
+
+ size_t read_offset;
+};
+
+static inline u32 bcm53xxspi_read(struct bcm53xxspi *b53spi, u16 offset)
+{
+ return bcma_read32(b53spi->core, offset);
+}
+
+static inline void bcm53xxspi_write(struct bcm53xxspi *b53spi, u16 offset,
+ u32 value)
+{
+ bcma_write32(b53spi->core, offset, value);
+}
+
+static inline unsigned int bcm53xxspi_calc_timeout(size_t len)
+{
+ /* Do some magic calculation based on length and buad. Add 10% and 1. */
+ return (len * 9000 / BCM53XXSPI_MAX_SPI_BAUD * 110 / 100) + 1;
+}
+
+static int bcm53xxspi_wait(struct bcm53xxspi *b53spi, unsigned int timeout_ms)
+{
+ unsigned long deadline;
+ u32 tmp;
+
+ /* SPE bit has to be 0 before we read MSPI STATUS */
+ deadline = jiffies + msecs_to_jiffies(BCM53XXSPI_SPE_TIMEOUT_MS);
+ do {
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ if (!(tmp & B53SPI_MSPI_SPCR2_SPE))
+ break;
+ udelay(5);
+ } while (!time_after_eq(jiffies, deadline));
+
+ if (tmp & B53SPI_MSPI_SPCR2_SPE)
+ goto spi_timeout;
+
+ /* Check status */
+ deadline = jiffies + msecs_to_jiffies(timeout_ms);
+ do {
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_MSPI_STATUS);
+ if (tmp & B53SPI_MSPI_MSPI_STATUS_SPIF) {
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
+ return 0;
+ }
+
+ cpu_relax();
+ udelay(100);
+ } while (!time_after_eq(jiffies, deadline));
+
+spi_timeout:
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_MSPI_STATUS, 0);
+
+ pr_err("Timeout waiting for SPI to be ready!\n");
+
+ return -EBUSY;
+}
+
+static void bcm53xxspi_buf_write(struct bcm53xxspi *b53spi, u8 *w_buf,
+ size_t len, bool cont)
+{
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < len; i++) {
+ /* Transmit Register File MSB */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_TXRAM + 4 * (i * 2),
+ (unsigned int)w_buf[i]);
+ }
+
+ for (i = 0; i < len; i++) {
+ tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
+ B53SPI_CDRAM_PCS_DSCK;
+ if (!cont && i == len - 1)
+ tmp &= ~B53SPI_CDRAM_CONT;
+ tmp &= ~0x1;
+ /* Command Register File */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
+ }
+
+ /* Set queue pointers */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP, len - 1);
+
+ if (cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
+
+ /* Start SPI transfer */
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ tmp |= B53SPI_MSPI_SPCR2_SPE;
+ if (cont)
+ tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
+
+ /* Wait for SPI to finish */
+ bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
+
+ if (!cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
+
+ b53spi->read_offset = len;
+}
+
+static void bcm53xxspi_buf_read(struct bcm53xxspi *b53spi, u8 *r_buf,
+ size_t len, bool cont)
+{
+ u32 tmp;
+ int i;
+
+ for (i = 0; i < b53spi->read_offset + len; i++) {
+ tmp = B53SPI_CDRAM_CONT | B53SPI_CDRAM_PCS_DISABLE_ALL |
+ B53SPI_CDRAM_PCS_DSCK;
+ if (!cont && i == b53spi->read_offset + len - 1)
+ tmp &= ~B53SPI_CDRAM_CONT;
+ tmp &= ~0x1;
+ /* Command Register File */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_CDRAM + 4 * i, tmp);
+ }
+
+ /* Set queue pointers */
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_NEWQP, 0);
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_ENDQP,
+ b53spi->read_offset + len - 1);
+
+ if (cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 1);
+
+ /* Start SPI transfer */
+ tmp = bcm53xxspi_read(b53spi, B53SPI_MSPI_SPCR2);
+ tmp |= B53SPI_MSPI_SPCR2_SPE;
+ if (cont)
+ tmp |= B53SPI_MSPI_SPCR2_CONT_AFTER_CMD;
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_SPCR2, tmp);
+
+ /* Wait for SPI to finish */
+ bcm53xxspi_wait(b53spi, bcm53xxspi_calc_timeout(len));
+
+ if (!cont)
+ bcm53xxspi_write(b53spi, B53SPI_MSPI_WRITE_LOCK, 0);
+
+ for (i = 0; i < len; ++i) {
+ int offset = b53spi->read_offset + i;
+
+ /* Data stored in the transmit register file LSB */
+ r_buf[i] = (u8)bcm53xxspi_read(b53spi, B53SPI_MSPI_RXRAM + 4 * (1 + offset * 2));
+ }
+
+ b53spi->read_offset = 0;
+}
+
+static int bcm53xxspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm53xxspi *b53spi = spi_master_get_devdata(master);
+ u8 *buf;
+ size_t left;
+
+ if (t->tx_buf) {
+ buf = (u8 *)t->tx_buf;
+ left = t->len;
+ while (left) {
+ size_t to_write = min_t(size_t, 16, left);
+ bool cont = left - to_write > 0;
+
+ bcm53xxspi_buf_write(b53spi, buf, to_write, cont);
+ left -= to_write;
+ buf += to_write;
+ }
+ }
+
+ if (t->rx_buf) {
+ buf = (u8 *)t->rx_buf;
+ left = t->len;
+ while (left) {
+ size_t to_read = min_t(size_t, 16 - b53spi->read_offset,
+ left);
+ bool cont = left - to_read > 0;
+
+ bcm53xxspi_buf_read(b53spi, buf, to_read, cont);
+ left -= to_read;
+ buf += to_read;
+ }
+ }
+
+ return 0;
+}
+
+/**************************************************
+ * BCMA
+ **************************************************/
+
+static struct spi_board_info bcm53xx_info = {
+ .modalias = "bcm53xxspiflash",
+};
+
+static const struct bcma_device_id bcm53xxspi_bcma_tbl[] = {
+ BCMA_CORE(BCMA_MANUF_BCM, BCMA_CORE_NS_QSPI, BCMA_ANY_REV, BCMA_ANY_CLASS),
+ {},
+};
+MODULE_DEVICE_TABLE(bcma, bcm53xxspi_bcma_tbl);
+
+static int bcm53xxspi_bcma_probe(struct bcma_device *core)
+{
+ struct bcm53xxspi *b53spi;
+ struct spi_master *master;
+ int err;
+
+ if (core->bus->drv_cc.core->id.rev != 42) {
+ pr_err("SPI on SoC with unsupported ChipCommon rev\n");
+ return -ENOTSUPP;
+ }
+
+ master = spi_alloc_master(&core->dev, sizeof(*b53spi));
+ if (!master)
+ return -ENOMEM;
+
+ b53spi = spi_master_get_devdata(master);
+ b53spi->master = master;
+ b53spi->core = core;
+
+ master->transfer_one = bcm53xxspi_transfer_one;
+
+ bcma_set_drvdata(core, b53spi);
+
+ err = devm_spi_register_master(&core->dev, master);
+ if (err) {
+ spi_master_put(master);
+ bcma_set_drvdata(core, NULL);
+ goto out;
+ }
+
+ /* Broadcom SoCs (at least with the CC rev 42) use SPI for flash only */
+ spi_new_device(master, &bcm53xx_info);
+
+out:
+ return err;
+}
+
+static void bcm53xxspi_bcma_remove(struct bcma_device *core)
+{
+ struct bcm53xxspi *b53spi = bcma_get_drvdata(core);
+
+ spi_unregister_master(b53spi->master);
+}
+
+static struct bcma_driver bcm53xxspi_bcma_driver = {
+ .name = KBUILD_MODNAME,
+ .id_table = bcm53xxspi_bcma_tbl,
+ .probe = bcm53xxspi_bcma_probe,
+ .remove = bcm53xxspi_bcma_remove,
+};
+
+/**************************************************
+ * Init & exit
+ **************************************************/
+
+static int __init bcm53xxspi_module_init(void)
+{
+ int err = 0;
+
+ err = bcma_driver_register(&bcm53xxspi_bcma_driver);
+ if (err)
+ pr_err("Failed to register bcma driver: %d\n", err);
+
+ return err;
+}
+
+static void __exit bcm53xxspi_module_exit(void)
+{
+ bcma_driver_unregister(&bcm53xxspi_bcma_driver);
+}
+
+module_init(bcm53xxspi_module_init);
+module_exit(bcm53xxspi_module_exit);
+
+MODULE_DESCRIPTION("Broadcom BCM53xx SPI Controller driver");
+MODULE_AUTHOR("Rafał Miłecki <zajec5@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm53xx.h b/drivers/spi/spi-bcm53xx.h
new file mode 100644
index 000000000..73575dfe6
--- /dev/null
+++ b/drivers/spi/spi-bcm53xx.h
@@ -0,0 +1,72 @@
+#ifndef SPI_BCM53XX_H
+#define SPI_BCM53XX_H
+
+#define B53SPI_BSPI_REVISION_ID 0x000
+#define B53SPI_BSPI_SCRATCH 0x004
+#define B53SPI_BSPI_MAST_N_BOOT_CTRL 0x008
+#define B53SPI_BSPI_BUSY_STATUS 0x00c
+#define B53SPI_BSPI_INTR_STATUS 0x010
+#define B53SPI_BSPI_B0_STATUS 0x014
+#define B53SPI_BSPI_B0_CTRL 0x018
+#define B53SPI_BSPI_B1_STATUS 0x01c
+#define B53SPI_BSPI_B1_CTRL 0x020
+#define B53SPI_BSPI_STRAP_OVERRIDE_CTRL 0x024
+#define B53SPI_BSPI_FLEX_MODE_ENABLE 0x028
+#define B53SPI_BSPI_BITS_PER_CYCLE 0x02c
+#define B53SPI_BSPI_BITS_PER_PHASE 0x030
+#define B53SPI_BSPI_CMD_AND_MODE_BYTE 0x034
+#define B53SPI_BSPI_BSPI_FLASH_UPPER_ADDR_BYTE 0x038
+#define B53SPI_BSPI_BSPI_XOR_VALUE 0x03c
+#define B53SPI_BSPI_BSPI_XOR_ENABLE 0x040
+#define B53SPI_BSPI_BSPI_PIO_MODE_ENABLE 0x044
+#define B53SPI_BSPI_BSPI_PIO_IODIR 0x048
+#define B53SPI_BSPI_BSPI_PIO_DATA 0x04c
+
+/* RAF */
+#define B53SPI_RAF_START_ADDR 0x100
+#define B53SPI_RAF_NUM_WORDS 0x104
+#define B53SPI_RAF_CTRL 0x108
+#define B53SPI_RAF_FULLNESS 0x10c
+#define B53SPI_RAF_WATERMARK 0x110
+#define B53SPI_RAF_STATUS 0x114
+#define B53SPI_RAF_READ_DATA 0x118
+#define B53SPI_RAF_WORD_CNT 0x11c
+#define B53SPI_RAF_CURR_ADDR 0x120
+
+/* MSPI */
+#define B53SPI_MSPI_SPCR0_LSB 0x200
+#define B53SPI_MSPI_SPCR0_MSB 0x204
+#define B53SPI_MSPI_SPCR1_LSB 0x208
+#define B53SPI_MSPI_SPCR1_MSB 0x20c
+#define B53SPI_MSPI_NEWQP 0x210
+#define B53SPI_MSPI_ENDQP 0x214
+#define B53SPI_MSPI_SPCR2 0x218
+#define B53SPI_MSPI_SPCR2_SPE 0x00000040
+#define B53SPI_MSPI_SPCR2_CONT_AFTER_CMD 0x00000080
+#define B53SPI_MSPI_MSPI_STATUS 0x220
+#define B53SPI_MSPI_MSPI_STATUS_SPIF 0x00000001
+#define B53SPI_MSPI_CPTQP 0x224
+#define B53SPI_MSPI_TXRAM 0x240 /* 32 registers, up to 0x2b8 */
+#define B53SPI_MSPI_RXRAM 0x2c0 /* 32 registers, up to 0x33c */
+#define B53SPI_MSPI_CDRAM 0x340 /* 16 registers, up to 0x37c */
+#define B53SPI_CDRAM_PCS_PCS0 0x00000001
+#define B53SPI_CDRAM_PCS_PCS1 0x00000002
+#define B53SPI_CDRAM_PCS_PCS2 0x00000004
+#define B53SPI_CDRAM_PCS_PCS3 0x00000008
+#define B53SPI_CDRAM_PCS_DISABLE_ALL 0x0000000f
+#define B53SPI_CDRAM_PCS_DSCK 0x00000010
+#define B53SPI_CDRAM_BITSE 0x00000040
+#define B53SPI_CDRAM_CONT 0x00000080
+#define B53SPI_MSPI_WRITE_LOCK 0x380
+#define B53SPI_MSPI_DISABLE_FLUSH_GEN 0x384
+
+/* Interrupt */
+#define B53SPI_INTR_RAF_LR_FULLNESS_REACHED 0x3a0
+#define B53SPI_INTR_RAF_LR_TRUNCATED 0x3a4
+#define B53SPI_INTR_RAF_LR_IMPATIENT 0x3a8
+#define B53SPI_INTR_RAF_LR_SESSION_DONE 0x3ac
+#define B53SPI_INTR_RAF_LR_OVERREAD 0x3b0
+#define B53SPI_INTR_MSPI_DONE 0x3b4
+#define B53SPI_INTR_MSPI_HALT_SET_TRANSACTION_DONE 0x3b8
+
+#endif /* SPI_BCM53XX_H */
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
new file mode 100644
index 000000000..f5ca6dc3a
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -0,0 +1,473 @@
+/*
+ * Broadcom BCM63XX High Speed SPI Controller driver
+ *
+ * Copyright 2000-2010 Broadcom Corporation
+ * Copyright 2012-2013 Jonas Gorski <jogo@openwrt.org>
+ *
+ * Licensed under the GNU/GPL. See COPYING for details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/mutex.h>
+
+#define HSSPI_GLOBAL_CTRL_REG 0x0
+#define GLOBAL_CTRL_CS_POLARITY_SHIFT 0
+#define GLOBAL_CTRL_CS_POLARITY_MASK 0x000000ff
+#define GLOBAL_CTRL_PLL_CLK_CTRL_SHIFT 8
+#define GLOBAL_CTRL_PLL_CLK_CTRL_MASK 0x0000ff00
+#define GLOBAL_CTRL_CLK_GATE_SSOFF BIT(16)
+#define GLOBAL_CTRL_CLK_POLARITY BIT(17)
+#define GLOBAL_CTRL_MOSI_IDLE BIT(18)
+
+#define HSSPI_GLOBAL_EXT_TRIGGER_REG 0x4
+
+#define HSSPI_INT_STATUS_REG 0x8
+#define HSSPI_INT_STATUS_MASKED_REG 0xc
+#define HSSPI_INT_MASK_REG 0x10
+
+#define HSSPI_PINGx_CMD_DONE(i) BIT((i * 8) + 0)
+#define HSSPI_PINGx_RX_OVER(i) BIT((i * 8) + 1)
+#define HSSPI_PINGx_TX_UNDER(i) BIT((i * 8) + 2)
+#define HSSPI_PINGx_POLL_TIMEOUT(i) BIT((i * 8) + 3)
+#define HSSPI_PINGx_CTRL_INVAL(i) BIT((i * 8) + 4)
+
+#define HSSPI_INT_CLEAR_ALL 0xff001f1f
+
+#define HSSPI_PINGPONG_COMMAND_REG(x) (0x80 + (x) * 0x40)
+#define PINGPONG_CMD_COMMAND_MASK 0xf
+#define PINGPONG_COMMAND_NOOP 0
+#define PINGPONG_COMMAND_START_NOW 1
+#define PINGPONG_COMMAND_START_TRIGGER 2
+#define PINGPONG_COMMAND_HALT 3
+#define PINGPONG_COMMAND_FLUSH 4
+#define PINGPONG_CMD_PROFILE_SHIFT 8
+#define PINGPONG_CMD_SS_SHIFT 12
+
+#define HSSPI_PINGPONG_STATUS_REG(x) (0x84 + (x) * 0x40)
+
+#define HSSPI_PROFILE_CLK_CTRL_REG(x) (0x100 + (x) * 0x20)
+#define CLK_CTRL_FREQ_CTRL_MASK 0x0000ffff
+#define CLK_CTRL_SPI_CLK_2X_SEL BIT(14)
+#define CLK_CTRL_ACCUM_RST_ON_LOOP BIT(15)
+
+#define HSSPI_PROFILE_SIGNAL_CTRL_REG(x) (0x104 + (x) * 0x20)
+#define SIGNAL_CTRL_LATCH_RISING BIT(12)
+#define SIGNAL_CTRL_LAUNCH_RISING BIT(13)
+#define SIGNAL_CTRL_ASYNC_INPUT_PATH BIT(16)
+
+#define HSSPI_PROFILE_MODE_CTRL_REG(x) (0x108 + (x) * 0x20)
+#define MODE_CTRL_MULTIDATA_RD_STRT_SHIFT 8
+#define MODE_CTRL_MULTIDATA_WR_STRT_SHIFT 12
+#define MODE_CTRL_MULTIDATA_RD_SIZE_SHIFT 16
+#define MODE_CTRL_MULTIDATA_WR_SIZE_SHIFT 18
+#define MODE_CTRL_MODE_3WIRE BIT(20)
+#define MODE_CTRL_PREPENDBYTE_CNT_SHIFT 24
+
+#define HSSPI_FIFO_REG(x) (0x200 + (x) * 0x200)
+
+
+#define HSSPI_OP_CODE_SHIFT 13
+#define HSSPI_OP_SLEEP (0 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ_WRITE (1 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_WRITE (2 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_READ (3 << HSSPI_OP_CODE_SHIFT)
+#define HSSPI_OP_SETIRQ (4 << HSSPI_OP_CODE_SHIFT)
+
+#define HSSPI_BUFFER_LEN 512
+#define HSSPI_OPCODE_LEN 2
+
+#define HSSPI_MAX_PREPEND_LEN 15
+
+#define HSSPI_MAX_SYNC_CLOCK 30000000
+
+#define HSSPI_BUS_NUM 1 /* 0 is legacy SPI */
+
+struct bcm63xx_hsspi {
+ struct completion done;
+ struct mutex bus_mutex;
+
+ struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs;
+ u8 __iomem *fifo;
+
+ u32 speed_hz;
+ u8 cs_polarity;
+};
+
+static void bcm63xx_hsspi_set_cs(struct bcm63xx_hsspi *bs, unsigned cs,
+ bool active)
+{
+ u32 reg;
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ reg &= ~BIT(cs);
+ if (active == !(bs->cs_polarity & BIT(cs)))
+ reg |= BIT(cs);
+
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static void bcm63xx_hsspi_set_clk(struct bcm63xx_hsspi *bs,
+ struct spi_device *spi, int hz)
+{
+ unsigned profile = spi->chip_select;
+ u32 reg;
+
+ reg = DIV_ROUND_UP(2048, DIV_ROUND_UP(bs->speed_hz, hz));
+ __raw_writel(CLK_CTRL_ACCUM_RST_ON_LOOP | reg,
+ bs->regs + HSSPI_PROFILE_CLK_CTRL_REG(profile));
+
+ reg = __raw_readl(bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+ if (hz > HSSPI_MAX_SYNC_CLOCK)
+ reg |= SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ else
+ reg &= ~SIGNAL_CTRL_ASYNC_INPUT_PATH;
+ __raw_writel(reg, bs->regs + HSSPI_PROFILE_SIGNAL_CTRL_REG(profile));
+
+ mutex_lock(&bs->bus_mutex);
+ /* setup clock polarity */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CLK_POLARITY;
+ if (spi->mode & SPI_CPOL)
+ reg |= GLOBAL_CTRL_CLK_POLARITY;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+}
+
+static int bcm63xx_hsspi_do_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ unsigned chip_select = spi->chip_select;
+ u16 opcode = 0;
+ int pending = t->len;
+ int step_size = HSSPI_BUFFER_LEN;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ bcm63xx_hsspi_set_clk(bs, spi, t->speed_hz);
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, true);
+
+ if (tx && rx)
+ opcode = HSSPI_OP_READ_WRITE;
+ else if (tx)
+ opcode = HSSPI_OP_WRITE;
+ else if (rx)
+ opcode = HSSPI_OP_READ;
+
+ if (opcode != HSSPI_OP_READ)
+ step_size -= HSSPI_OPCODE_LEN;
+
+ __raw_writel(0 << MODE_CTRL_PREPENDBYTE_CNT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_WR_STRT_SHIFT |
+ 2 << MODE_CTRL_MULTIDATA_RD_STRT_SHIFT | 0xff,
+ bs->regs + HSSPI_PROFILE_MODE_CTRL_REG(chip_select));
+
+ while (pending > 0) {
+ int curr_step = min_t(int, step_size, pending);
+
+ reinit_completion(&bs->done);
+ if (tx) {
+ memcpy_toio(bs->fifo + HSSPI_OPCODE_LEN, tx, curr_step);
+ tx += curr_step;
+ }
+
+ __raw_writew(opcode | curr_step, bs->fifo);
+
+ /* enable interrupt */
+ __raw_writel(HSSPI_PINGx_CMD_DONE(0),
+ bs->regs + HSSPI_INT_MASK_REG);
+
+ /* start the transfer */
+ __raw_writel(!chip_select << PINGPONG_CMD_SS_SHIFT |
+ chip_select << PINGPONG_CMD_PROFILE_SHIFT |
+ PINGPONG_COMMAND_START_NOW,
+ bs->regs + HSSPI_PINGPONG_COMMAND_REG(0));
+
+ if (wait_for_completion_timeout(&bs->done, HZ) == 0) {
+ dev_err(&bs->pdev->dev, "transfer timed out!\n");
+ return -ETIMEDOUT;
+ }
+
+ if (rx) {
+ memcpy_fromio(rx, bs->fifo, curr_step);
+ rx += curr_step;
+ }
+
+ pending -= curr_step;
+ }
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_setup(struct spi_device *spi)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = __raw_readl(bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+ reg &= ~(SIGNAL_CTRL_LAUNCH_RISING | SIGNAL_CTRL_LATCH_RISING);
+ if (spi->mode & SPI_CPHA)
+ reg |= SIGNAL_CTRL_LAUNCH_RISING;
+ else
+ reg |= SIGNAL_CTRL_LATCH_RISING;
+ __raw_writel(reg, bs->regs +
+ HSSPI_PROFILE_SIGNAL_CTRL_REG(spi->chip_select));
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ /* only change actual polarities if there is no transfer */
+ if ((reg & GLOBAL_CTRL_CS_POLARITY_MASK) == bs->cs_polarity) {
+ if (spi->mode & SPI_CS_HIGH)
+ reg |= BIT(spi->chip_select);
+ else
+ reg &= ~BIT(spi->chip_select);
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ }
+
+ if (spi->mode & SPI_CS_HIGH)
+ bs->cs_polarity |= BIT(spi->chip_select);
+ else
+ bs->cs_polarity &= ~BIT(spi->chip_select);
+
+ mutex_unlock(&bs->bus_mutex);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ struct spi_device *spi = msg->spi;
+ int status = -EINVAL;
+ int dummy_cs;
+ u32 reg;
+
+ /* This controller does not support keeping CS active during idle.
+ * To work around this, we use the following ugly hack:
+ *
+ * a. Invert the target chip select's polarity so it will be active.
+ * b. Select a "dummy" chip select to use as the hardware target.
+ * c. Invert the dummy chip select's polarity so it will be inactive
+ * during the actual transfers.
+ * d. Tell the hardware to send to the dummy chip select. Thanks to
+ * the multiplexed nature of SPI the actual target will receive
+ * the transfer and we see its response.
+ *
+ * e. At the end restore the polarities again to their default values.
+ */
+
+ dummy_cs = !spi->chip_select;
+ bcm63xx_hsspi_set_cs(bs, dummy_cs, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ status = bcm63xx_hsspi_do_txrx(spi, t);
+ if (status)
+ break;
+
+ msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (t->cs_change)
+ bcm63xx_hsspi_set_cs(bs, spi->chip_select, false);
+ }
+
+ mutex_lock(&bs->bus_mutex);
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ reg &= ~GLOBAL_CTRL_CS_POLARITY_MASK;
+ reg |= bs->cs_polarity;
+ __raw_writel(reg, bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ mutex_unlock(&bs->bus_mutex);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t bcm63xx_hsspi_interrupt(int irq, void *dev_id)
+{
+ struct bcm63xx_hsspi *bs = (struct bcm63xx_hsspi *)dev_id;
+
+ if (__raw_readl(bs->regs + HSSPI_INT_STATUS_MASKED_REG) == 0)
+ return IRQ_NONE;
+
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+static int bcm63xx_hsspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct bcm63xx_hsspi *bs;
+ struct resource *res_mem;
+ void __iomem *regs;
+ struct device *dev = &pdev->dev;
+ struct clk *clk;
+ int irq, ret;
+ u32 reg, rate;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ return -ENXIO;
+ }
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ regs = devm_ioremap_resource(dev, res_mem);
+ if (IS_ERR(regs))
+ return PTR_ERR(regs);
+
+ clk = devm_clk_get(dev, "hsspi");
+
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ rate = clk_get_rate(clk);
+ if (!rate)
+ return -EINVAL;
+
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ return ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*bs));
+ if (!master) {
+ ret = -ENOMEM;
+ goto out_disable_clk;
+ }
+
+ bs = spi_master_get_devdata(master);
+ bs->pdev = pdev;
+ bs->clk = clk;
+ bs->regs = regs;
+ bs->speed_hz = rate;
+ bs->fifo = (u8 __iomem *)(bs->regs + HSSPI_FIFO_REG(0));
+
+ mutex_init(&bs->bus_mutex);
+ init_completion(&bs->done);
+
+ master->bus_num = HSSPI_BUS_NUM;
+ master->num_chipselect = 8;
+ master->setup = bcm63xx_hsspi_setup;
+ master->transfer_one_message = bcm63xx_hsspi_transfer_one;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ /* Initialize the hardware */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+
+ /* clean up any pending interrupts */
+ __raw_writel(HSSPI_INT_CLEAR_ALL, bs->regs + HSSPI_INT_STATUS_REG);
+
+ /* read out default CS polarities */
+ reg = __raw_readl(bs->regs + HSSPI_GLOBAL_CTRL_REG);
+ bs->cs_polarity = reg & GLOBAL_CTRL_CS_POLARITY_MASK;
+ __raw_writel(reg | GLOBAL_CTRL_CLK_GATE_SSOFF,
+ bs->regs + HSSPI_GLOBAL_CTRL_REG);
+
+ ret = devm_request_irq(dev, irq, bcm63xx_hsspi_interrupt, IRQF_SHARED,
+ pdev->name, bs);
+
+ if (ret)
+ goto out_put_master;
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto out_put_master;
+
+ return 0;
+
+out_put_master:
+ spi_master_put(master);
+out_disable_clk:
+ clk_disable_unprepare(clk);
+ return ret;
+}
+
+
+static int bcm63xx_hsspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ /* reset the hardware and block queue progress */
+ __raw_writel(0, bs->regs + HSSPI_INT_MASK_REG);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_hsspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_hsspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_hsspi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm63xx_hsspi_pm_ops, bcm63xx_hsspi_suspend,
+ bcm63xx_hsspi_resume);
+
+static struct platform_driver bcm63xx_hsspi_driver = {
+ .driver = {
+ .name = "bcm63xx-hsspi",
+ .pm = &bcm63xx_hsspi_pm_ops,
+ },
+ .probe = bcm63xx_hsspi_probe,
+ .remove = bcm63xx_hsspi_remove,
+};
+
+module_platform_driver(bcm63xx_hsspi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_hsspi");
+MODULE_DESCRIPTION("Broadcom BCM63xx High Speed SPI Controller driver");
+MODULE_AUTHOR("Jonas Gorski <jogo@openwrt.org>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bcm63xx.c b/drivers/spi/spi-bcm63xx.c
new file mode 100644
index 000000000..e73e2b052
--- /dev/null
+++ b/drivers/spi/spi-bcm63xx.c
@@ -0,0 +1,480 @@
+/*
+ * Broadcom BCM63xx SPI controller support
+ *
+ * Copyright (C) 2009-2012 Florian Fainelli <florian@openwrt.org>
+ * Copyright (C) 2010 Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/completion.h>
+#include <linux/err.h>
+#include <linux/pm_runtime.h>
+
+#include <bcm63xx_dev_spi.h>
+
+#define BCM63XX_SPI_MAX_PREPEND 15
+
+struct bcm63xx_spi {
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+
+ /* Platform data */
+ unsigned fifo_size;
+ unsigned int msg_type_shift;
+ unsigned int msg_ctl_width;
+
+ /* data iomem */
+ u8 __iomem *tx_io;
+ const u8 __iomem *rx_io;
+
+ struct clk *clk;
+ struct platform_device *pdev;
+};
+
+static inline u8 bcm_spi_readb(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+ return bcm_readb(bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline u16 bcm_spi_readw(struct bcm63xx_spi *bs,
+ unsigned int offset)
+{
+ return bcm_readw(bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline void bcm_spi_writeb(struct bcm63xx_spi *bs,
+ u8 value, unsigned int offset)
+{
+ bcm_writeb(value, bs->regs + bcm63xx_spireg(offset));
+}
+
+static inline void bcm_spi_writew(struct bcm63xx_spi *bs,
+ u16 value, unsigned int offset)
+{
+ bcm_writew(value, bs->regs + bcm63xx_spireg(offset));
+}
+
+static const unsigned bcm63xx_spi_freq_table[SPI_CLK_MASK][2] = {
+ { 20000000, SPI_CLK_20MHZ },
+ { 12500000, SPI_CLK_12_50MHZ },
+ { 6250000, SPI_CLK_6_250MHZ },
+ { 3125000, SPI_CLK_3_125MHZ },
+ { 1563000, SPI_CLK_1_563MHZ },
+ { 781000, SPI_CLK_0_781MHZ },
+ { 391000, SPI_CLK_0_391MHZ }
+};
+
+static void bcm63xx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u8 clk_cfg, reg;
+ int i;
+
+ /* Find the closest clock configuration */
+ for (i = 0; i < SPI_CLK_MASK; i++) {
+ if (t->speed_hz >= bcm63xx_spi_freq_table[i][0]) {
+ clk_cfg = bcm63xx_spi_freq_table[i][1];
+ break;
+ }
+ }
+
+ /* No matching configuration found, default to lowest */
+ if (i == SPI_CLK_MASK)
+ clk_cfg = SPI_CLK_0_391MHZ;
+
+ /* clear existing clock configuration bits of the register */
+ reg = bcm_spi_readb(bs, SPI_CLK_CFG);
+ reg &= ~SPI_CLK_MASK;
+ reg |= clk_cfg;
+
+ bcm_spi_writeb(bs, reg, SPI_CLK_CFG);
+ dev_dbg(&spi->dev, "Setting clock register to %02x (hz %d)\n",
+ clk_cfg, t->speed_hz);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA)
+
+static int bcm63xx_txrx_bufs(struct spi_device *spi, struct spi_transfer *first,
+ unsigned int num_transfers)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(spi->master);
+ u16 msg_ctl;
+ u16 cmd;
+ u8 rx_tail;
+ unsigned int i, timeout = 0, prepend_len = 0, len = 0;
+ struct spi_transfer *t = first;
+ bool do_rx = false;
+ bool do_tx = false;
+
+ /* Disable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ if (num_transfers > 1 && t->tx_buf && t->len <= BCM63XX_SPI_MAX_PREPEND)
+ prepend_len = t->len;
+
+ /* prepare the buffer */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->tx_buf) {
+ do_tx = true;
+ memcpy_toio(bs->tx_io + len, t->tx_buf, t->len);
+
+ /* don't prepend more than one tx */
+ if (t != first)
+ prepend_len = 0;
+ }
+
+ if (t->rx_buf) {
+ do_rx = true;
+ /* prepend is half-duplex write only */
+ if (t == first)
+ prepend_len = 0;
+ }
+
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
+
+ reinit_completion(&bs->done);
+
+ /* Fill in the Message control register */
+ msg_ctl = (len << SPI_BYTE_CNT_SHIFT);
+
+ if (do_rx && do_tx && prepend_len == 0)
+ msg_ctl |= (SPI_FD_RW << bs->msg_type_shift);
+ else if (do_rx)
+ msg_ctl |= (SPI_HD_R << bs->msg_type_shift);
+ else if (do_tx)
+ msg_ctl |= (SPI_HD_W << bs->msg_type_shift);
+
+ switch (bs->msg_ctl_width) {
+ case 8:
+ bcm_spi_writeb(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ case 16:
+ bcm_spi_writew(bs, msg_ctl, SPI_MSG_CTL);
+ break;
+ }
+
+ /* Issue the transfer */
+ cmd = SPI_CMD_START_IMMEDIATE;
+ cmd |= (prepend_len << SPI_CMD_PREPEND_BYTE_CNT_SHIFT);
+ cmd |= (spi->chip_select << SPI_CMD_DEVICE_ID_SHIFT);
+ bcm_spi_writew(bs, cmd, SPI_CMD);
+
+ /* Enable the CMD_DONE interrupt */
+ bcm_spi_writeb(bs, SPI_INTR_CMD_DONE, SPI_INT_MASK);
+
+ timeout = wait_for_completion_timeout(&bs->done, HZ);
+ if (!timeout)
+ return -ETIMEDOUT;
+
+ if (!do_rx)
+ return 0;
+
+ len = 0;
+ t = first;
+ /* Read out all the data */
+ for (i = 0; i < num_transfers; i++) {
+ if (t->rx_buf)
+ memcpy_fromio(t->rx_buf, bs->rx_io + len, t->len);
+
+ if (t != first || prepend_len == 0)
+ len += t->len;
+
+ t = list_entry(t->transfer_list.next, struct spi_transfer,
+ transfer_list);
+ }
+
+ return 0;
+}
+
+static int bcm63xx_spi_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ struct spi_transfer *t, *first = NULL;
+ struct spi_device *spi = m->spi;
+ int status = 0;
+ unsigned int n_transfers = 0, total_len = 0;
+ bool can_use_prepend = false;
+
+ /*
+ * This SPI controller does not support keeping CS active after a
+ * transfer.
+ * Work around this by merging as many transfers we can into one big
+ * full-duplex transfers.
+ */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (!first)
+ first = t;
+
+ n_transfers++;
+ total_len += t->len;
+
+ if (n_transfers == 2 && !first->rx_buf && !t->tx_buf &&
+ first->len <= BCM63XX_SPI_MAX_PREPEND)
+ can_use_prepend = true;
+ else if (can_use_prepend && t->tx_buf)
+ can_use_prepend = false;
+
+ /* we can only transfer one fifo worth of data */
+ if ((can_use_prepend &&
+ total_len > (bs->fifo_size + BCM63XX_SPI_MAX_PREPEND)) ||
+ (!can_use_prepend && total_len > bs->fifo_size)) {
+ dev_err(&spi->dev, "unable to do transfers larger than FIFO size (%i > %i)\n",
+ total_len, bs->fifo_size);
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* all combined transfers have to have the same speed */
+ if (t->speed_hz != first->speed_hz) {
+ dev_err(&spi->dev, "unable to change speed between transfers\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ /* CS will be deasserted directly after transfer */
+ if (t->delay_usecs) {
+ dev_err(&spi->dev, "unable to keep CS asserted after transfer\n");
+ status = -EINVAL;
+ goto exit;
+ }
+
+ if (t->cs_change ||
+ list_is_last(&t->transfer_list, &m->transfers)) {
+ /* configure adapter for a new transfer */
+ bcm63xx_spi_setup_transfer(spi, first);
+
+ /* send the data */
+ status = bcm63xx_txrx_bufs(spi, first, n_transfers);
+ if (status)
+ goto exit;
+
+ m->actual_length += total_len;
+
+ first = NULL;
+ n_transfers = 0;
+ total_len = 0;
+ can_use_prepend = false;
+ }
+ }
+exit:
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+/* This driver supports single master mode only. Hence
+ * CMD_DONE is the only interrupt we care about
+ */
+static irqreturn_t bcm63xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct spi_master *master = (struct spi_master *)dev_id;
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ u8 intr;
+
+ /* Read interupts and clear them immediately */
+ intr = bcm_spi_readb(bs, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ /* A transfer completed */
+ if (intr & SPI_INTR_CMD_DONE)
+ complete(&bs->done);
+
+ return IRQ_HANDLED;
+}
+
+
+static int bcm63xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct device *dev = &pdev->dev;
+ struct bcm63xx_spi_pdata *pdata = dev_get_platdata(&pdev->dev);
+ int irq;
+ struct spi_master *master;
+ struct clk *clk;
+ struct bcm63xx_spi *bs;
+ int ret;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(dev, "no irq\n");
+ return -ENXIO;
+ }
+
+ clk = devm_clk_get(dev, "spi");
+ if (IS_ERR(clk)) {
+ dev_err(dev, "no clock for device\n");
+ return PTR_ERR(clk);
+ }
+
+ master = spi_alloc_master(dev, sizeof(*bs));
+ if (!master) {
+ dev_err(dev, "out of memory\n");
+ return -ENOMEM;
+ }
+
+ bs = spi_master_get_devdata(master);
+ init_completion(&bs->done);
+
+ platform_set_drvdata(pdev, master);
+ bs->pdev = pdev;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ bs->regs = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(bs->regs)) {
+ ret = PTR_ERR(bs->regs);
+ goto out_err;
+ }
+
+ bs->irq = irq;
+ bs->clk = clk;
+ bs->fifo_size = pdata->fifo_size;
+
+ ret = devm_request_irq(&pdev->dev, irq, bcm63xx_spi_interrupt, 0,
+ pdev->name, master);
+ if (ret) {
+ dev_err(dev, "unable to request irq\n");
+ goto out_err;
+ }
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+ master->transfer_one_message = bcm63xx_spi_transfer_one;
+ master->mode_bits = MODEBITS;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+ bs->msg_type_shift = pdata->msg_type_shift;
+ bs->msg_ctl_width = pdata->msg_ctl_width;
+ bs->tx_io = (u8 *)(bs->regs + bcm63xx_spireg(SPI_MSG_DATA));
+ bs->rx_io = (const u8 *)(bs->regs + bcm63xx_spireg(SPI_RX_DATA));
+
+ switch (bs->msg_ctl_width) {
+ case 8:
+ case 16:
+ break;
+ default:
+ dev_err(dev, "unsupported MSG_CTL width: %d\n",
+ bs->msg_ctl_width);
+ goto out_err;
+ }
+
+ /* Initialize hardware */
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ goto out_err;
+
+ bcm_spi_writeb(bs, SPI_INTR_CLEAR_ALL, SPI_INT_STATUS);
+
+ /* register and we are done */
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(dev, "spi register failed\n");
+ goto out_clk_disable;
+ }
+
+ dev_info(dev, "at 0x%08x (irq %d, FIFOs size %d)\n",
+ r->start, irq, bs->fifo_size);
+
+ return 0;
+
+out_clk_disable:
+ clk_disable_unprepare(clk);
+out_err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int bcm63xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ /* reset spi block */
+ bcm_spi_writeb(bs, 0, SPI_INT_MASK);
+
+ /* HW shutdown */
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bcm63xx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ clk_disable_unprepare(bs->clk);
+
+ return 0;
+}
+
+static int bcm63xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct bcm63xx_spi *bs = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(bs->clk);
+ if (ret)
+ return ret;
+
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops bcm63xx_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(bcm63xx_spi_suspend, bcm63xx_spi_resume)
+};
+
+static struct platform_driver bcm63xx_spi_driver = {
+ .driver = {
+ .name = "bcm63xx-spi",
+ .pm = &bcm63xx_spi_pm_ops,
+ },
+ .probe = bcm63xx_spi_probe,
+ .remove = bcm63xx_spi_remove,
+};
+
+module_platform_driver(bcm63xx_spi_driver);
+
+MODULE_ALIAS("platform:bcm63xx_spi");
+MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
+MODULE_AUTHOR("Tanguy Bouzeloc <tanguy.bouzeloc@efixo.com>");
+MODULE_DESCRIPTION("Broadcom BCM63xx SPI Controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-bfin-sport.c b/drivers/spi/spi-bfin-sport.c
new file mode 100644
index 000000000..a78693189
--- /dev/null
+++ b/drivers/spi/spi-bfin-sport.c
@@ -0,0 +1,929 @@
+/*
+ * SPI bus via the Blackfin SPORT peripheral
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/blackfin.h>
+#include <asm/bfin_sport.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-sport-spi"
+#define DRV_DESC "SPI bus via the Blackfin SPORT"
+
+MODULE_AUTHOR("Cliff Cai");
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:bfin-sport-spi");
+
+enum bfin_sport_spi_state {
+ START_STATE,
+ RUNNING_STATE,
+ DONE_STATE,
+ ERROR_STATE,
+};
+
+struct bfin_sport_spi_master_data;
+
+struct bfin_sport_transfer_ops {
+ void (*write) (struct bfin_sport_spi_master_data *);
+ void (*read) (struct bfin_sport_spi_master_data *);
+ void (*duplex) (struct bfin_sport_spi_master_data *);
+};
+
+struct bfin_sport_spi_master_data {
+ /* Driver model hookup */
+ struct device *dev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct sport_register __iomem *regs;
+ int err_irq;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool run;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ enum bfin_sport_spi_state state;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_sport_spi_slave_data *cur_chip;
+ union {
+ void *tx;
+ u8 *tx8;
+ u16 *tx16;
+ };
+ void *tx_end;
+ union {
+ void *rx;
+ u8 *rx8;
+ u16 *rx16;
+ };
+ void *rx_end;
+
+ int cs_change;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+struct bfin_sport_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ struct bfin_sport_transfer_ops *ops;
+};
+
+static void
+bfin_sport_spi_enable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->tcr1, TSPEN);
+ bfin_write_or(&drv_data->regs->rcr1, TSPEN);
+ SSYNC();
+}
+
+static void
+bfin_sport_spi_disable(struct bfin_sport_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->tcr1, ~TSPEN);
+ bfin_write_and(&drv_data->regs->rcr1, ~TSPEN);
+ SSYNC();
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16
+bfin_sport_hz_to_spi_baud(u32 speed_hz)
+{
+ u_long clk, sclk = get_sclk();
+ int div = (sclk / (2 * speed_hz)) - 1;
+
+ if (div < 0)
+ div = 0;
+
+ clk = sclk / (2 * (div + 1));
+
+ if (clk > speed_hz)
+ div++;
+
+ return div;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void
+bfin_sport_spi_cs_active(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 0);
+}
+
+static void
+bfin_sport_spi_cs_deactive(struct bfin_sport_spi_slave_data *chip)
+{
+ gpio_direction_output(chip->cs_gpio, 1);
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+static void
+bfin_sport_spi_stat_poll_complete(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & RXNE)) {
+ if (!time_before(jiffies, timeout))
+ break;
+ }
+}
+
+static void
+bfin_sport_spi_u8_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u8_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx8++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx8++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u8 = {
+ .write = bfin_sport_spi_u8_writer,
+ .read = bfin_sport_spi_u8_reader,
+ .duplex = bfin_sport_spi_u8_duplex,
+};
+
+static void
+bfin_sport_spi_u16_writer(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 dummy;
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ dummy = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_reader(struct bfin_sport_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, tx_val);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static void
+bfin_sport_spi_u16_duplex(struct bfin_sport_spi_master_data *drv_data)
+{
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tx16, *drv_data->tx16++);
+ bfin_sport_spi_stat_poll_complete(drv_data);
+ *drv_data->rx16++ = bfin_read(&drv_data->regs->rx16);
+ }
+}
+
+static struct bfin_sport_transfer_ops bfin_sport_transfer_ops_u16 = {
+ .write = bfin_sport_spi_u16_writer,
+ .read = bfin_sport_spi_u16_reader,
+ .duplex = bfin_sport_spi_u16_duplex,
+};
+
+/* stop controller and re-config current chip */
+static void
+bfin_sport_spi_restore_state(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+
+ bfin_sport_spi_disable(drv_data);
+ dev_dbg(drv_data->dev, "restoring spi ctl state\n");
+
+ bfin_write(&drv_data->regs->tcr1, chip->ctl_reg);
+ bfin_write(&drv_data->regs->tclkdiv, chip->baud);
+ SSYNC();
+
+ bfin_write(&drv_data->regs->rcr1, chip->ctl_reg & ~(ITCLK | ITFS));
+ SSYNC();
+
+ bfin_sport_spi_cs_active(chip);
+}
+
+/* test if there is more transfer to be done */
+static enum bfin_sport_spi_state
+bfin_sport_spi_next_transfer(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ }
+
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void
+bfin_sport_spi_giveback(struct bfin_sport_spi_master_data *drv_data)
+{
+ struct bfin_sport_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->state = START_STATE;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ if (!drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+static irqreturn_t
+sport_err_handler(int irq, void *dev_id)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_id;
+ u16 status;
+
+ dev_dbg(drv_data->dev, "%s enter\n", __func__);
+ status = bfin_read(&drv_data->regs->stat) & (TOVF | TUVF | ROVF | RUVF);
+
+ if (status) {
+ bfin_write(&drv_data->regs->stat, status);
+ SSYNC();
+
+ bfin_sport_spi_disable(drv_data);
+ dev_err(drv_data->dev, "status error:%s%s%s%s\n",
+ status & TOVF ? " TOVF" : "",
+ status & TUVF ? " TUVF" : "",
+ status & ROVF ? " ROVF" : "",
+ status & RUVF ? " RUVF" : "");
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void
+bfin_sport_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_sport_spi_master_data *drv_data = (void *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_sport_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u32 tranf_success = 1;
+ u32 transfer_speed;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ if (transfer->speed_hz)
+ transfer_speed = bfin_sport_hz_to_spi_baud(transfer->speed_hz);
+ else
+ transfer_speed = chip->baud;
+ bfin_write(&drv_data->regs->tclkdiv, transfer_speed);
+ SSYNC();
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (drv_data->state == ERROR_STATE) {
+ dev_dbg(drv_data->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (drv_data->state == DONE_STATE) {
+ dev_dbg(drv_data->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_sport_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (drv_data->state == RUNNING_STATE) {
+ dev_dbg(drv_data->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(drv_data->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else
+ drv_data->tx = NULL;
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(drv_data->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else
+ drv_data->rx = NULL;
+
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word;
+ if (bits_per_word == 16)
+ drv_data->ops = &bfin_sport_transfer_ops_u16;
+ else
+ drv_data->ops = &bfin_sport_transfer_ops_u8;
+ bfin_write(&drv_data->regs->tcr2, bits_per_word - 1);
+ bfin_write(&drv_data->regs->tfsdiv, bits_per_word - 1);
+ bfin_write(&drv_data->regs->rcr2, bits_per_word - 1);
+
+ drv_data->state = RUNNING_STATE;
+
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_active(chip);
+
+ dev_dbg(drv_data->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ bits_per_word, transfer->len);
+
+ /* PIO mode write then read */
+ dev_dbg(drv_data->dev, "doing IO transfer\n");
+
+ bfin_sport_spi_enable(drv_data);
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+ bfin_sport_spi_disable(drv_data);
+
+ if (!tranf_success) {
+ dev_dbg(drv_data->dev, "IO write error!\n");
+ drv_data->state = ERROR_STATE;
+ } else {
+ /* Update total byte transferred */
+ message->actual_length += transfer->len;
+ /* Move to next transfer of this msg */
+ drv_data->state = bfin_sport_spi_next_transfer(drv_data);
+ if (drv_data->cs_change)
+ bfin_sport_spi_cs_deactive(chip);
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void
+bfin_sport_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_sport_spi_master_data *drv_data;
+ unsigned long flags;
+ struct spi_message *next_msg;
+
+ drv_data = container_of(work, struct bfin_sport_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->run) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ next_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ drv_data->cur_msg = next_msg;
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initialize message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ bfin_sport_spi_restore_state(drv_data);
+ dev_dbg(drv_data->dev, "got a message to pump, "
+ "state is set to: baud %d, cs_gpio %i, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->cs_gpio,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(drv_data->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int
+bfin_sport_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_sport_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->run) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer()\n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->run && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+/* Called every time common spi devices change state */
+static int
+bfin_sport_spi_setup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip, *first = NULL;
+ int ret;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ struct bfin5xx_spi_chip *chip_info;
+
+ chip = first = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ /* platform chip_info isn't required */
+ chip_info = spi->controller_data;
+ if (chip_info) {
+ /*
+ * DITFS and TDTYPE are only thing we don't set, but
+ * they probably shouldn't be changed by people.
+ */
+ if (chip_info->ctl_reg || chip_info->enable_dma) {
+ ret = -EINVAL;
+ dev_err(&spi->dev, "don't set ctl_reg/enable_dma fields\n");
+ goto error;
+ }
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ }
+ }
+
+ /* translate common spi framework into our register
+ * following configure contents are same for tx and rx.
+ */
+
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg &= ~TCKFE;
+ else
+ chip->ctl_reg |= TCKFE;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= TLSBIT;
+ else
+ chip->ctl_reg &= ~TLSBIT;
+
+ /* Sport in master mode */
+ chip->ctl_reg |= ITCLK | ITFS | TFSR | LATFS | LTFS;
+
+ chip->baud = bfin_sport_hz_to_spi_baud(spi->max_speed_hz);
+
+ chip->cs_gpio = spi->chip_select;
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret)
+ goto error;
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d\n",
+ spi->modalias, spi->bits_per_word);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, GPIO is %i\n",
+ chip->ctl_reg, spi->chip_select);
+
+ spi_set_ctldata(spi, chip);
+
+ bfin_sport_spi_cs_deactive(chip);
+
+ return ret;
+
+ error:
+ kfree(first);
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void
+bfin_sport_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_sport_spi_slave_data *chip = spi_get_ctldata(spi);
+
+ if (!chip)
+ return;
+
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+}
+
+static int
+bfin_sport_spi_init_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->run = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_sport_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_sport_spi_pump_messages);
+ drv_data->workqueue =
+ create_singlethread_workqueue(dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int
+bfin_sport_spi_start_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->run || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->run = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static inline int
+bfin_sport_spi_stop_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->run = false;
+ while (!list_empty(&drv_data->queue) && drv_data->busy && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static inline int
+bfin_sport_spi_destroy_queue(struct bfin_sport_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int bfin_sport_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct resource *res, *ires;
+ struct bfin_sport_spi_master_data *drv_data;
+ int status;
+
+ platform_info = dev_get_platdata(dev);
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*master) + 16);
+ if (!master) {
+ dev_err(dev, "cannot alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->dev = dev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_sport_spi_cleanup;
+ master->setup = bfin_sport_spi_setup;
+ master->transfer = bfin_sport_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "cannot map registers\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!ires) {
+ dev_err(dev, "cannot get IORESOURCE_IRQ\n");
+ status = -ENODEV;
+ goto out_error_get_ires;
+ }
+ drv_data->err_irq = ires->start;
+
+ /* Initial and start queue */
+ status = bfin_sport_spi_init_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = request_irq(drv_data->err_irq, sport_err_handler,
+ 0, "sport_spi_err", drv_data);
+ if (status) {
+ dev_err(dev, "unable to request sport err irq\n");
+ goto out_error_irq;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status) {
+ dev_err(dev, "requesting peripherals failed\n");
+ goto out_error_peripheral;
+ }
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_master;
+ }
+
+ dev_info(dev, "%s, regs_base@%p\n", DRV_DESC, drv_data->regs);
+ return 0;
+
+ out_error_master:
+ peripheral_free_list(drv_data->pin_req);
+ out_error_peripheral:
+ free_irq(drv_data->err_irq, drv_data);
+ out_error_irq:
+ out_error_queue_alloc:
+ bfin_sport_spi_destroy_queue(drv_data);
+ out_error_get_ires:
+ iounmap(drv_data->regs);
+ out_error_ioremap:
+ out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int bfin_sport_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_sport_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_sport_spi_destroy_queue(drv_data);
+ if (status)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_sport_spi_disable(drv_data);
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bfin_sport_spi_suspend(struct device *dev)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
+ int status;
+
+ status = bfin_sport_spi_stop_queue(drv_data);
+ if (status)
+ return status;
+
+ /* stop hardware */
+ bfin_sport_spi_disable(drv_data);
+
+ return status;
+}
+
+static int bfin_sport_spi_resume(struct device *dev)
+{
+ struct bfin_sport_spi_master_data *drv_data = dev_get_drvdata(dev);
+ int status;
+
+ /* Enable the SPI interface */
+ bfin_sport_spi_enable(drv_data);
+
+ /* Start the queue running */
+ status = bfin_sport_spi_start_queue(drv_data);
+ if (status)
+ dev_err(drv_data->dev, "problem resuming queue\n");
+
+ return status;
+}
+
+static SIMPLE_DEV_PM_OPS(bfin_sport_spi_pm_ops, bfin_sport_spi_suspend,
+ bfin_sport_spi_resume);
+
+#define BFIN_SPORT_SPI_PM_OPS (&bfin_sport_spi_pm_ops)
+#else
+#define BFIN_SPORT_SPI_PM_OPS NULL
+#endif
+
+static struct platform_driver bfin_sport_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = BFIN_SPORT_SPI_PM_OPS,
+ },
+ .probe = bfin_sport_spi_probe,
+ .remove = bfin_sport_spi_remove,
+};
+module_platform_driver(bfin_sport_spi_driver);
diff --git a/drivers/spi/spi-bfin5xx.c b/drivers/spi/spi-bfin5xx.c
new file mode 100644
index 000000000..a3d65b4f4
--- /dev/null
+++ b/drivers/spi/spi-bfin5xx.c
@@ -0,0 +1,1473 @@
+/*
+ * Blackfin On-Chip SPI Driver
+ *
+ * Copyright 2004-2010 Analog Devices Inc.
+ *
+ * Enter bugs at http://blackfin.uclinux.org/
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <linux/ioport.h>
+#include <linux/irq.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/dma-mapping.h>
+#include <linux/spi/spi.h>
+#include <linux/workqueue.h>
+
+#include <asm/dma.h>
+#include <asm/portmux.h>
+#include <asm/bfin5xx_spi.h>
+#include <asm/cacheflush.h>
+
+#define DRV_NAME "bfin-spi"
+#define DRV_AUTHOR "Bryan Wu, Luke Yang"
+#define DRV_DESC "Blackfin on-chip SPI Controller Driver"
+#define DRV_VERSION "1.0"
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_LICENSE("GPL");
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+struct bfin_spi_master_data;
+
+struct bfin_spi_transfer_ops {
+ void (*write) (struct bfin_spi_master_data *);
+ void (*read) (struct bfin_spi_master_data *);
+ void (*duplex) (struct bfin_spi_master_data *);
+};
+
+struct bfin_spi_master_data {
+ /* Driver model hookup */
+ struct platform_device *pdev;
+
+ /* SPI framework hookup */
+ struct spi_master *master;
+
+ /* Regs base of SPI controller */
+ struct bfin_spi_regs __iomem *regs;
+
+ /* Pin request list */
+ u16 *pin_req;
+
+ /* BFIN hookup */
+ struct bfin5xx_spi_master *master_info;
+
+ /* Driver message queue */
+ struct workqueue_struct *workqueue;
+ struct work_struct pump_messages;
+ spinlock_t lock;
+ struct list_head queue;
+ int busy;
+ bool running;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct bfin_spi_slave_data *cur_chip;
+ size_t len_in_bytes;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+
+ /* DMA stuffs */
+ int dma_channel;
+ int dma_mapped;
+ int dma_requested;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+
+ int irq_requested;
+ int spi_irq;
+
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes;
+ u16 ctrl_reg;
+ u16 flag_reg;
+
+ int cs_change;
+ const struct bfin_spi_transfer_ops *ops;
+};
+
+struct bfin_spi_slave_data {
+ u16 ctl_reg;
+ u16 baud;
+ u16 flag;
+
+ u8 chip_select_num;
+ u8 enable_dma;
+ u16 cs_chg_udelay; /* Some devices require > 255usec delay */
+ u32 cs_gpio;
+ u16 idle_tx_val;
+ u8 pio_interrupt; /* use spi data irq */
+ const struct bfin_spi_transfer_ops *ops;
+};
+
+static void bfin_spi_enable(struct bfin_spi_master_data *drv_data)
+{
+ bfin_write_or(&drv_data->regs->ctl, BIT_CTL_ENABLE);
+}
+
+static void bfin_spi_disable(struct bfin_spi_master_data *drv_data)
+{
+ bfin_write_and(&drv_data->regs->ctl, ~BIT_CTL_ENABLE);
+}
+
+/* Caculate the SPI_BAUD register value based on input HZ */
+static u16 hz_to_spi_baud(u32 speed_hz)
+{
+ u_long sclk = get_sclk();
+ u16 spi_baud = (sclk / (2 * speed_hz));
+
+ if ((sclk % (2 * speed_hz)) > 0)
+ spi_baud++;
+
+ if (spi_baud < MIN_SPI_BAUD_VAL)
+ spi_baud = MIN_SPI_BAUD_VAL;
+
+ return spi_baud;
+}
+
+static int bfin_spi_flush(struct bfin_spi_master_data *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ /* wait for stop and clear stat */
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF) && --limit)
+ cpu_relax();
+
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
+
+ return limit;
+}
+
+/* Chip select operation functions for cs_change flag */
+static void bfin_spi_cs_active(struct bfin_spi_master_data *drv_data, struct bfin_spi_slave_data *chip)
+{
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_and(&drv_data->regs->flg, ~chip->flag);
+ else
+ gpio_set_value(chip->cs_gpio, 0);
+}
+
+static void bfin_spi_cs_deactive(struct bfin_spi_master_data *drv_data,
+ struct bfin_spi_slave_data *chip)
+{
+ if (likely(chip->chip_select_num < MAX_CTRL_CS))
+ bfin_write_or(&drv_data->regs->flg, chip->flag);
+ else
+ gpio_set_value(chip->cs_gpio, 1);
+
+ /* Move delay here for consistency */
+ if (chip->cs_chg_udelay)
+ udelay(chip->cs_chg_udelay);
+}
+
+/* enable or disable the pin muxed by GPIO and SPI CS to work as SPI CS */
+static inline void bfin_spi_cs_enable(struct bfin_spi_master_data *drv_data,
+ struct bfin_spi_slave_data *chip)
+{
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_or(&drv_data->regs->flg, chip->flag >> 8);
+}
+
+static inline void bfin_spi_cs_disable(struct bfin_spi_master_data *drv_data,
+ struct bfin_spi_slave_data *chip)
+{
+ if (chip->chip_select_num < MAX_CTRL_CS)
+ bfin_write_and(&drv_data->regs->flg, ~(chip->flag >> 8));
+}
+
+/* stop controller and re-config current chip*/
+static void bfin_spi_restore_state(struct bfin_spi_master_data *drv_data)
+{
+ struct bfin_spi_slave_data *chip = drv_data->cur_chip;
+
+ /* Clear status and disable clock */
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
+ bfin_spi_disable(drv_data);
+ dev_dbg(&drv_data->pdev->dev, "restoring spi ctl state\n");
+
+ SSYNC();
+
+ /* Load the registers */
+ bfin_write(&drv_data->regs->ctl, chip->ctl_reg);
+ bfin_write(&drv_data->regs->baud, chip->baud);
+
+ bfin_spi_enable(drv_data);
+ bfin_spi_cs_active(drv_data, chip);
+}
+
+/* used to kick off transfer in rx mode and read unwanted RX data */
+static inline void bfin_spi_dummy_read(struct bfin_spi_master_data *drv_data)
+{
+ (void) bfin_read(&drv_data->regs->rdbr);
+}
+
+static void bfin_spi_u8_writer(struct bfin_spi_master_data *drv_data)
+{
+ /* clear RXS (we check for RXS inside the loop) */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
+ /* wait until transfer finished.
+ checking SPIF or TXS may not guarantee transfer completion */
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ /* discard RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+ }
+}
+
+static void bfin_spi_u8_reader(struct bfin_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
+ }
+}
+
+static void bfin_spi_u8_duplex(struct bfin_spi_master_data *drv_data)
+{
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tdbr, (*(u8 *) (drv_data->tx++)));
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u8 *) (drv_data->rx++) = bfin_read(&drv_data->regs->rdbr);
+ }
+}
+
+static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u8 = {
+ .write = bfin_spi_u8_writer,
+ .read = bfin_spi_u8_reader,
+ .duplex = bfin_spi_u8_duplex,
+};
+
+static void bfin_spi_u16_writer(struct bfin_spi_master_data *drv_data)
+{
+ /* clear RXS (we check for RXS inside the loop) */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->tx < drv_data->tx_end) {
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
+ drv_data->tx += 2;
+ /* wait until transfer finished.
+ checking SPIF or TXS may not guarantee transfer completion */
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ /* discard RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+ }
+}
+
+static void bfin_spi_u16_reader(struct bfin_spi_master_data *drv_data)
+{
+ u16 tx_val = drv_data->cur_chip->idle_tx_val;
+
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tdbr, tx_val);
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
+ drv_data->rx += 2;
+ }
+}
+
+static void bfin_spi_u16_duplex(struct bfin_spi_master_data *drv_data)
+{
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+
+ while (drv_data->rx < drv_data->rx_end) {
+ bfin_write(&drv_data->regs->tdbr, (*(u16 *) (drv_data->tx)));
+ drv_data->tx += 2;
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+ *(u16 *) (drv_data->rx) = bfin_read(&drv_data->regs->rdbr);
+ drv_data->rx += 2;
+ }
+}
+
+static const struct bfin_spi_transfer_ops bfin_bfin_spi_transfer_ops_u16 = {
+ .write = bfin_spi_u16_writer,
+ .read = bfin_spi_u16_reader,
+ .duplex = bfin_spi_u16_duplex,
+};
+
+/* test if there is more transfer to be done */
+static void *bfin_spi_next_transfer(struct bfin_spi_master_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return RUNNING_STATE;
+ } else
+ return DONE_STATE;
+}
+
+/*
+ * caller already set message->status;
+ * dma and pio irqs are blocked give finished message back
+ */
+static void bfin_spi_giveback(struct bfin_spi_master_data *drv_data)
+{
+ struct bfin_spi_slave_data *chip = drv_data->cur_chip;
+ unsigned long flags;
+ struct spi_message *msg;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+ msg = drv_data->cur_msg;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ msg->state = NULL;
+
+ if (!drv_data->cs_change)
+ bfin_spi_cs_deactive(drv_data, chip);
+
+ /* Not stop spi in autobuffer mode */
+ if (drv_data->tx_dma != 0xFFFF)
+ bfin_spi_disable(drv_data);
+
+ if (msg->complete)
+ msg->complete(msg->context);
+}
+
+/* spi data irq handler */
+static irqreturn_t bfin_spi_pio_irq_handler(int irq, void *dev_id)
+{
+ struct bfin_spi_master_data *drv_data = dev_id;
+ struct bfin_spi_slave_data *chip = drv_data->cur_chip;
+ struct spi_message *msg = drv_data->cur_msg;
+ int n_bytes = drv_data->n_bytes;
+ int loop = 0;
+
+ /* wait until transfer finished. */
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_RXS))
+ cpu_relax();
+
+ if ((drv_data->tx && drv_data->tx >= drv_data->tx_end) ||
+ (drv_data->rx && drv_data->rx >= (drv_data->rx_end - n_bytes))) {
+ /* last read */
+ if (drv_data->rx) {
+ dev_dbg(&drv_data->pdev->dev, "last read\n");
+ if (!(n_bytes % 2)) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++)
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++)
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ }
+ drv_data->rx += n_bytes;
+ }
+
+ msg->actual_length += drv_data->len_in_bytes;
+ if (drv_data->cs_change)
+ bfin_spi_cs_deactive(drv_data, chip);
+ /* Move to next transfer */
+ msg->state = bfin_spi_next_transfer(drv_data);
+
+ disable_irq_nosync(drv_data->spi_irq);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ if (drv_data->rx && drv_data->tx) {
+ /* duplex */
+ dev_dbg(&drv_data->pdev->dev, "duplex: write_TDBR\n");
+ if (!(n_bytes % 2)) {
+ u16 *buf = (u16 *)drv_data->rx;
+ u16 *buf2 = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ u8 *buf2 = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf2++);
+ }
+ }
+ } else if (drv_data->rx) {
+ /* read */
+ dev_dbg(&drv_data->pdev->dev, "read: write_TDBR\n");
+ if (!(n_bytes % 2)) {
+ u16 *buf = (u16 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->rx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ *buf++ = bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
+ }
+ }
+ } else if (drv_data->tx) {
+ /* write */
+ dev_dbg(&drv_data->pdev->dev, "write: write_TDBR\n");
+ if (!(n_bytes % 2)) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes / 2; loop++) {
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
+ }
+ } else {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < n_bytes; loop++) {
+ bfin_read(&drv_data->regs->rdbr);
+ bfin_write(&drv_data->regs->tdbr, *buf++);
+ }
+ }
+ }
+
+ if (drv_data->tx)
+ drv_data->tx += n_bytes;
+ if (drv_data->rx)
+ drv_data->rx += n_bytes;
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t bfin_spi_dma_irq_handler(int irq, void *dev_id)
+{
+ struct bfin_spi_master_data *drv_data = dev_id;
+ struct bfin_spi_slave_data *chip = drv_data->cur_chip;
+ struct spi_message *msg = drv_data->cur_msg;
+ unsigned long timeout;
+ unsigned short dmastat = get_dma_curr_irqstat(drv_data->dma_channel);
+ u16 spistat = bfin_read(&drv_data->regs->stat);
+
+ dev_dbg(&drv_data->pdev->dev,
+ "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
+ dmastat, spistat);
+
+ if (drv_data->rx != NULL) {
+ u16 cr = bfin_read(&drv_data->regs->ctl);
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+ bfin_write(&drv_data->regs->ctl, cr & ~BIT_CTL_TIMOD); /* Restore State */
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR); /* Clear Status */
+ }
+
+ clear_dma_irqstat(drv_data->dma_channel);
+
+ /*
+ * wait for the last transaction shifted out. HRM states:
+ * at this point there may still be data in the SPI DMA FIFO waiting
+ * to be transmitted ... software needs to poll TXS in the SPI_STAT
+ * register until it goes low for 2 successive reads
+ */
+ if (drv_data->tx != NULL) {
+ while ((bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS) ||
+ (bfin_read(&drv_data->regs->stat) & BIT_STAT_TXS))
+ cpu_relax();
+ }
+
+ dev_dbg(&drv_data->pdev->dev,
+ "in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
+ dmastat, bfin_read(&drv_data->regs->stat));
+
+ timeout = jiffies + HZ;
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
+ if (!time_before(jiffies, timeout)) {
+ dev_warn(&drv_data->pdev->dev, "timeout waiting for SPIF\n");
+ break;
+ } else
+ cpu_relax();
+
+ if ((dmastat & DMA_ERR) && (spistat & BIT_STAT_RBSY)) {
+ msg->state = ERROR_STATE;
+ dev_err(&drv_data->pdev->dev, "dma receive: fifo/buffer overflow\n");
+ } else {
+ msg->actual_length += drv_data->len_in_bytes;
+
+ if (drv_data->cs_change)
+ bfin_spi_cs_deactive(drv_data, chip);
+
+ /* Move to next transfer */
+ msg->state = bfin_spi_next_transfer(drv_data);
+ }
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ /* free the irq handler before next transfer */
+ dev_dbg(&drv_data->pdev->dev,
+ "disable dma channel irq%d\n",
+ drv_data->dma_channel);
+ dma_disable_irq_nosync(drv_data->dma_channel);
+
+ return IRQ_HANDLED;
+}
+
+static void bfin_spi_pump_transfers(unsigned long data)
+{
+ struct bfin_spi_master_data *drv_data = (struct bfin_spi_master_data *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct bfin_spi_slave_data *chip = NULL;
+ unsigned int bits_per_word;
+ u16 cr, cr_width = 0, dma_width, dma_config;
+ u32 tranf_success = 1;
+ u8 full_duplex = 0;
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /*
+ * if msg is error or done, report it back using complete() callback
+ */
+
+ /* Handle for abort */
+ if (message->state == ERROR_STATE) {
+ dev_dbg(&drv_data->pdev->dev, "transfer: we've hit an error\n");
+ message->status = -EIO;
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ dev_dbg(&drv_data->pdev->dev, "transfer: all done!\n");
+ message->status = 0;
+ bfin_spi_flush(drv_data);
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer */
+ if (message->state == RUNNING_STATE) {
+ dev_dbg(&drv_data->pdev->dev, "transfer: still running ...\n");
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ }
+
+ /* Flush any existing transfers that may be sitting in the hardware */
+ if (bfin_spi_flush(drv_data) == 0) {
+ dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
+ message->status = -EIO;
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ if (transfer->len == 0) {
+ /* Move to next transfer of this msg */
+ message->state = bfin_spi_next_transfer(drv_data);
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return;
+ }
+
+ if (transfer->tx_buf != NULL) {
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ dev_dbg(&drv_data->pdev->dev, "tx_buf is %p, tx_end is %p\n",
+ transfer->tx_buf, drv_data->tx_end);
+ } else {
+ drv_data->tx = NULL;
+ }
+
+ if (transfer->rx_buf != NULL) {
+ full_duplex = transfer->tx_buf != NULL;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ dev_dbg(&drv_data->pdev->dev, "rx_buf is %p, rx_end is %p\n",
+ transfer->rx_buf, drv_data->rx_end);
+ } else {
+ drv_data->rx = NULL;
+ }
+
+ drv_data->rx_dma = transfer->rx_dma;
+ drv_data->tx_dma = transfer->tx_dma;
+ drv_data->len_in_bytes = transfer->len;
+ drv_data->cs_change = transfer->cs_change;
+
+ /* Bits per word setup */
+ bits_per_word = transfer->bits_per_word;
+ if (bits_per_word == 16) {
+ drv_data->n_bytes = bits_per_word/8;
+ drv_data->len = (transfer->len) >> 1;
+ cr_width = BIT_CTL_WORDSIZE;
+ drv_data->ops = &bfin_bfin_spi_transfer_ops_u16;
+ } else if (bits_per_word == 8) {
+ drv_data->n_bytes = bits_per_word/8;
+ drv_data->len = transfer->len;
+ drv_data->ops = &bfin_bfin_spi_transfer_ops_u8;
+ }
+ cr = bfin_read(&drv_data->regs->ctl) & ~(BIT_CTL_TIMOD | BIT_CTL_WORDSIZE);
+ cr |= cr_width;
+ bfin_write(&drv_data->regs->ctl, cr);
+
+ dev_dbg(&drv_data->pdev->dev,
+ "transfer: drv_data->ops is %p, chip->ops is %p, u8_ops is %p\n",
+ drv_data->ops, chip->ops, &bfin_bfin_spi_transfer_ops_u8);
+
+ message->state = RUNNING_STATE;
+ dma_config = 0;
+
+ /* Speed setup (surely valid because already checked) */
+ if (transfer->speed_hz)
+ bfin_write(&drv_data->regs->baud, hz_to_spi_baud(transfer->speed_hz));
+ else
+ bfin_write(&drv_data->regs->baud, chip->baud);
+
+ bfin_write(&drv_data->regs->stat, BIT_STAT_CLR);
+ bfin_spi_cs_active(drv_data, chip);
+
+ dev_dbg(&drv_data->pdev->dev,
+ "now pumping a transfer: width is %d, len is %d\n",
+ cr_width, transfer->len);
+
+ /*
+ * Try to map dma buffer and do a dma transfer. If successful use,
+ * different way to r/w according to the enable_dma settings and if
+ * we are not doing a full duplex transfer (since the hardware does
+ * not support full duplex DMA transfers).
+ */
+ if (!full_duplex && drv_data->cur_chip->enable_dma
+ && drv_data->len > 6) {
+
+ unsigned long dma_start_addr, flags;
+
+ disable_dma(drv_data->dma_channel);
+ clear_dma_irqstat(drv_data->dma_channel);
+
+ /* config dma channel */
+ dev_dbg(&drv_data->pdev->dev, "doing dma transfer\n");
+ set_dma_x_count(drv_data->dma_channel, drv_data->len);
+ if (cr_width == BIT_CTL_WORDSIZE) {
+ set_dma_x_modify(drv_data->dma_channel, 2);
+ dma_width = WDSIZE_16;
+ } else {
+ set_dma_x_modify(drv_data->dma_channel, 1);
+ dma_width = WDSIZE_8;
+ }
+
+ /* poll for SPI completion before start */
+ while (!(bfin_read(&drv_data->regs->stat) & BIT_STAT_SPIF))
+ cpu_relax();
+
+ /* dirty hack for autobuffer DMA mode */
+ if (drv_data->tx_dma == 0xFFFF) {
+ dev_dbg(&drv_data->pdev->dev,
+ "doing autobuffer DMA out.\n");
+
+ /* no irq in autobuffer mode */
+ dma_config =
+ (DMAFLOW_AUTO | RESTART | dma_width | DI_EN);
+ set_dma_config(drv_data->dma_channel, dma_config);
+ set_dma_start_addr(drv_data->dma_channel,
+ (unsigned long)drv_data->tx);
+ enable_dma(drv_data->dma_channel);
+
+ /* start SPI transfer */
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TIMOD_DMA_TX);
+
+ /* just return here, there can only be one transfer
+ * in this mode
+ */
+ message->status = 0;
+ bfin_spi_giveback(drv_data);
+ return;
+ }
+
+ /* In dma mode, rx or tx must be NULL in one transfer */
+ dma_config = (RESTART | dma_width | DI_EN);
+ if (drv_data->rx != NULL) {
+ /* set transfer mode, and enable SPI */
+ dev_dbg(&drv_data->pdev->dev, "doing DMA in to %p (size %zx)\n",
+ drv_data->rx, drv_data->len_in_bytes);
+
+ /* invalidate caches, if needed */
+ if (bfin_addr_dcacheable((unsigned long) drv_data->rx))
+ invalidate_dcache_range((unsigned long) drv_data->rx,
+ (unsigned long) (drv_data->rx +
+ drv_data->len_in_bytes));
+
+ dma_config |= WNR;
+ dma_start_addr = (unsigned long)drv_data->rx;
+ cr |= BIT_CTL_TIMOD_DMA_RX | BIT_CTL_SENDOPT;
+
+ } else if (drv_data->tx != NULL) {
+ dev_dbg(&drv_data->pdev->dev, "doing DMA out.\n");
+
+ /* flush caches, if needed */
+ if (bfin_addr_dcacheable((unsigned long) drv_data->tx))
+ flush_dcache_range((unsigned long) drv_data->tx,
+ (unsigned long) (drv_data->tx +
+ drv_data->len_in_bytes));
+
+ dma_start_addr = (unsigned long)drv_data->tx;
+ cr |= BIT_CTL_TIMOD_DMA_TX;
+
+ } else
+ BUG();
+
+ /* oh man, here there be monsters ... and i dont mean the
+ * fluffy cute ones from pixar, i mean the kind that'll eat
+ * your data, kick your dog, and love it all. do *not* try
+ * and change these lines unless you (1) heavily test DMA
+ * with SPI flashes on a loaded system (e.g. ping floods),
+ * (2) know just how broken the DMA engine interaction with
+ * the SPI peripheral is, and (3) have someone else to blame
+ * when you screw it all up anyways.
+ */
+ set_dma_start_addr(drv_data->dma_channel, dma_start_addr);
+ set_dma_config(drv_data->dma_channel, dma_config);
+ local_irq_save(flags);
+ SSYNC();
+ bfin_write(&drv_data->regs->ctl, cr);
+ enable_dma(drv_data->dma_channel);
+ dma_enable_irq(drv_data->dma_channel);
+ local_irq_restore(flags);
+
+ return;
+ }
+
+ /*
+ * We always use SPI_WRITE mode (transfer starts with TDBR write).
+ * SPI_READ mode (transfer starts with RDBR read) seems to have
+ * problems with setting up the output value in TDBR prior to the
+ * start of the transfer.
+ */
+ bfin_write(&drv_data->regs->ctl, cr | BIT_CTL_TXMOD);
+
+ if (chip->pio_interrupt) {
+ /* SPI irq should have been disabled by now */
+
+ /* discard old RX data and clear RXS */
+ bfin_spi_dummy_read(drv_data);
+
+ /* start transfer */
+ if (drv_data->tx == NULL)
+ bfin_write(&drv_data->regs->tdbr, chip->idle_tx_val);
+ else {
+ int loop;
+ if (bits_per_word == 16) {
+ u16 *buf = (u16 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 16;
+ loop++) {
+ bfin_write(&drv_data->regs->tdbr, *buf++);
+ }
+ } else if (bits_per_word == 8) {
+ u8 *buf = (u8 *)drv_data->tx;
+ for (loop = 0; loop < bits_per_word / 8; loop++)
+ bfin_write(&drv_data->regs->tdbr, *buf++);
+ }
+
+ drv_data->tx += drv_data->n_bytes;
+ }
+
+ /* once TDBR is empty, interrupt is triggered */
+ enable_irq(drv_data->spi_irq);
+ return;
+ }
+
+ /* IO mode */
+ dev_dbg(&drv_data->pdev->dev, "doing IO transfer\n");
+
+ if (full_duplex) {
+ /* full duplex mode */
+ BUG_ON((drv_data->tx_end - drv_data->tx) !=
+ (drv_data->rx_end - drv_data->rx));
+ dev_dbg(&drv_data->pdev->dev,
+ "IO duplex: cr is 0x%x\n", cr);
+
+ drv_data->ops->duplex(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->tx != NULL) {
+ /* write only half duplex */
+ dev_dbg(&drv_data->pdev->dev,
+ "IO write: cr is 0x%x\n", cr);
+
+ drv_data->ops->write(drv_data);
+
+ if (drv_data->tx != drv_data->tx_end)
+ tranf_success = 0;
+ } else if (drv_data->rx != NULL) {
+ /* read only half duplex */
+ dev_dbg(&drv_data->pdev->dev,
+ "IO read: cr is 0x%x\n", cr);
+
+ drv_data->ops->read(drv_data);
+ if (drv_data->rx != drv_data->rx_end)
+ tranf_success = 0;
+ }
+
+ if (!tranf_success) {
+ dev_dbg(&drv_data->pdev->dev,
+ "IO write error!\n");
+ message->state = ERROR_STATE;
+ } else {
+ /* Update total byte transferred */
+ message->actual_length += drv_data->len_in_bytes;
+ /* Move to next transfer of this msg */
+ message->state = bfin_spi_next_transfer(drv_data);
+ if (drv_data->cs_change && message->state != DONE_STATE) {
+ bfin_spi_flush(drv_data);
+ bfin_spi_cs_deactive(drv_data, chip);
+ }
+ }
+
+ /* Schedule next transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+/* pop a msg from queue and kick off real transfer */
+static void bfin_spi_pump_messages(struct work_struct *work)
+{
+ struct bfin_spi_master_data *drv_data;
+ unsigned long flags;
+
+ drv_data = container_of(work, struct bfin_spi_master_data, pump_messages);
+
+ /* Lock queue and check for queue work */
+ spin_lock_irqsave(&drv_data->lock, flags);
+ if (list_empty(&drv_data->queue) || !drv_data->running) {
+ /* pumper kicked off but no work to do */
+ drv_data->busy = 0;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Make sure we are not already running a message */
+ if (drv_data->cur_msg) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ drv_data->cur_msg = list_entry(drv_data->queue.next,
+ struct spi_message, queue);
+
+ /* Setup the SSP using the per chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+ bfin_spi_restore_state(drv_data);
+
+ list_del_init(&drv_data->cur_msg->queue);
+
+ /* Initial message state */
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ dev_dbg(&drv_data->pdev->dev,
+ "got a message to pump, state is set to: baud "
+ "%d, flag 0x%x, ctl 0x%x\n",
+ drv_data->cur_chip->baud, drv_data->cur_chip->flag,
+ drv_data->cur_chip->ctl_reg);
+
+ dev_dbg(&drv_data->pdev->dev,
+ "the first transfer len is %d\n",
+ drv_data->cur_transfer->len);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+
+ drv_data->busy = 1;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+}
+
+/*
+ * got a msg to transfer, queue it in drv_data->queue.
+ * And kick off message pumper
+ */
+static int bfin_spi_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (!drv_data->running) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -ESHUTDOWN;
+ }
+
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+ msg->state = START_STATE;
+
+ dev_dbg(&spi->dev, "adding an msg in transfer() \n");
+ list_add_tail(&msg->queue, &drv_data->queue);
+
+ if (drv_data->running && !drv_data->busy)
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return 0;
+}
+
+#define MAX_SPI_SSEL 7
+
+static const u16 ssel[][MAX_SPI_SSEL] = {
+ {P_SPI0_SSEL1, P_SPI0_SSEL2, P_SPI0_SSEL3,
+ P_SPI0_SSEL4, P_SPI0_SSEL5,
+ P_SPI0_SSEL6, P_SPI0_SSEL7},
+
+ {P_SPI1_SSEL1, P_SPI1_SSEL2, P_SPI1_SSEL3,
+ P_SPI1_SSEL4, P_SPI1_SSEL5,
+ P_SPI1_SSEL6, P_SPI1_SSEL7},
+
+ {P_SPI2_SSEL1, P_SPI2_SSEL2, P_SPI2_SSEL3,
+ P_SPI2_SSEL4, P_SPI2_SSEL5,
+ P_SPI2_SSEL6, P_SPI2_SSEL7},
+};
+
+/* setup for devices (may be called multiple times -- not just first setup) */
+static int bfin_spi_setup(struct spi_device *spi)
+{
+ struct bfin5xx_spi_chip *chip_info;
+ struct bfin_spi_slave_data *chip = NULL;
+ struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+ u16 bfin_ctl_reg;
+ int ret = -EINVAL;
+
+ /* Only alloc (or use chip_info) on first setup */
+ chip_info = NULL;
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip) {
+ dev_err(&spi->dev, "cannot allocate chip data\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ chip->enable_dma = 0;
+ chip_info = spi->controller_data;
+ }
+
+ /* Let people set non-standard bits directly */
+ bfin_ctl_reg = BIT_CTL_OPENDRAIN | BIT_CTL_EMISO |
+ BIT_CTL_PSSE | BIT_CTL_GM | BIT_CTL_SZ;
+
+ /* chip_info isn't always needed */
+ if (chip_info) {
+ /* Make sure people stop trying to set fields via ctl_reg
+ * when they should actually be using common SPI framework.
+ * Currently we let through: WOM EMISO PSSE GM SZ.
+ * Not sure if a user actually needs/uses any of these,
+ * but let's assume (for now) they do.
+ */
+ if (chip_info->ctl_reg & ~bfin_ctl_reg) {
+ dev_err(&spi->dev,
+ "do not set bits in ctl_reg that the SPI framework manages\n");
+ goto error;
+ }
+ chip->enable_dma = chip_info->enable_dma != 0
+ && drv_data->master_info->enable_dma;
+ chip->ctl_reg = chip_info->ctl_reg;
+ chip->cs_chg_udelay = chip_info->cs_chg_udelay;
+ chip->idle_tx_val = chip_info->idle_tx_val;
+ chip->pio_interrupt = chip_info->pio_interrupt;
+ } else {
+ /* force a default base state */
+ chip->ctl_reg &= bfin_ctl_reg;
+ }
+
+ /* translate common spi framework into our register */
+ if (spi->mode & SPI_CPOL)
+ chip->ctl_reg |= BIT_CTL_CPOL;
+ if (spi->mode & SPI_CPHA)
+ chip->ctl_reg |= BIT_CTL_CPHA;
+ if (spi->mode & SPI_LSB_FIRST)
+ chip->ctl_reg |= BIT_CTL_LSBF;
+ /* we dont support running in slave mode (yet?) */
+ chip->ctl_reg |= BIT_CTL_MASTER;
+
+ /*
+ * Notice: for blackfin, the speed_hz is the value of register
+ * SPI_BAUD, not the real baudrate
+ */
+ chip->baud = hz_to_spi_baud(spi->max_speed_hz);
+ chip->chip_select_num = spi->chip_select;
+ if (chip->chip_select_num < MAX_CTRL_CS) {
+ if (!(spi->mode & SPI_CPHA))
+ dev_warn(&spi->dev,
+ "Warning: SPI CPHA not set: Slave Select not under software control!\n"
+ "See Documentation/blackfin/bfin-spi-notes.txt\n");
+
+ chip->flag = (1 << spi->chip_select) << 8;
+ } else
+ chip->cs_gpio = chip->chip_select_num - MAX_CTRL_CS;
+
+ if (chip->enable_dma && chip->pio_interrupt) {
+ dev_err(&spi->dev,
+ "enable_dma is set, do not set pio_interrupt\n");
+ goto error;
+ }
+ /*
+ * if any one SPI chip is registered and wants DMA, request the
+ * DMA channel for it
+ */
+ if (chip->enable_dma && !drv_data->dma_requested) {
+ /* register dma irq handler */
+ ret = request_dma(drv_data->dma_channel, "BFIN_SPI_DMA");
+ if (ret) {
+ dev_err(&spi->dev,
+ "Unable to request BlackFin SPI DMA channel\n");
+ goto error;
+ }
+ drv_data->dma_requested = 1;
+
+ ret = set_dma_callback(drv_data->dma_channel,
+ bfin_spi_dma_irq_handler, drv_data);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to set dma callback\n");
+ goto error;
+ }
+ dma_disable_irq(drv_data->dma_channel);
+ }
+
+ if (chip->pio_interrupt && !drv_data->irq_requested) {
+ ret = request_irq(drv_data->spi_irq, bfin_spi_pio_irq_handler,
+ 0, "BFIN_SPI", drv_data);
+ if (ret) {
+ dev_err(&spi->dev, "Unable to register spi IRQ\n");
+ goto error;
+ }
+ drv_data->irq_requested = 1;
+ /* we use write mode, spi irq has to be disabled here */
+ disable_irq(drv_data->spi_irq);
+ }
+
+ if (chip->chip_select_num >= MAX_CTRL_CS) {
+ /* Only request on first setup */
+ if (spi_get_ctldata(spi) == NULL) {
+ ret = gpio_request(chip->cs_gpio, spi->modalias);
+ if (ret) {
+ dev_err(&spi->dev, "gpio_request() error\n");
+ goto pin_error;
+ }
+ gpio_direction_output(chip->cs_gpio, 1);
+ }
+ }
+
+ dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
+ spi->modalias, spi->bits_per_word, chip->enable_dma);
+ dev_dbg(&spi->dev, "ctl_reg is 0x%x, flag_reg is 0x%x\n",
+ chip->ctl_reg, chip->flag);
+
+ spi_set_ctldata(spi, chip);
+
+ dev_dbg(&spi->dev, "chip select number is %d\n", chip->chip_select_num);
+ if (chip->chip_select_num < MAX_CTRL_CS) {
+ ret = peripheral_request(ssel[spi->master->bus_num]
+ [chip->chip_select_num-1], spi->modalias);
+ if (ret) {
+ dev_err(&spi->dev, "peripheral_request() error\n");
+ goto pin_error;
+ }
+ }
+
+ bfin_spi_cs_enable(drv_data, chip);
+ bfin_spi_cs_deactive(drv_data, chip);
+
+ return 0;
+
+ pin_error:
+ if (chip->chip_select_num >= MAX_CTRL_CS)
+ gpio_free(chip->cs_gpio);
+ else
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->chip_select_num - 1]);
+ error:
+ if (chip) {
+ if (drv_data->dma_requested)
+ free_dma(drv_data->dma_channel);
+ drv_data->dma_requested = 0;
+
+ kfree(chip);
+ /* prevent free 'chip' twice */
+ spi_set_ctldata(spi, NULL);
+ }
+
+ return ret;
+}
+
+/*
+ * callback for spi framework.
+ * clean driver specific data
+ */
+static void bfin_spi_cleanup(struct spi_device *spi)
+{
+ struct bfin_spi_slave_data *chip = spi_get_ctldata(spi);
+ struct bfin_spi_master_data *drv_data = spi_master_get_devdata(spi->master);
+
+ if (!chip)
+ return;
+
+ if (chip->chip_select_num < MAX_CTRL_CS) {
+ peripheral_free(ssel[spi->master->bus_num]
+ [chip->chip_select_num-1]);
+ bfin_spi_cs_disable(drv_data, chip);
+ } else
+ gpio_free(chip->cs_gpio);
+
+ kfree(chip);
+ /* prevent free 'chip' twice */
+ spi_set_ctldata(spi, NULL);
+}
+
+static int bfin_spi_init_queue(struct bfin_spi_master_data *drv_data)
+{
+ INIT_LIST_HEAD(&drv_data->queue);
+ spin_lock_init(&drv_data->lock);
+
+ drv_data->running = false;
+ drv_data->busy = 0;
+
+ /* init transfer tasklet */
+ tasklet_init(&drv_data->pump_transfers,
+ bfin_spi_pump_transfers, (unsigned long)drv_data);
+
+ /* init messages workqueue */
+ INIT_WORK(&drv_data->pump_messages, bfin_spi_pump_messages);
+ drv_data->workqueue = create_singlethread_workqueue(
+ dev_name(drv_data->master->dev.parent));
+ if (drv_data->workqueue == NULL)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int bfin_spi_start_queue(struct bfin_spi_master_data *drv_data)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ if (drv_data->running || drv_data->busy) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ return -EBUSY;
+ }
+
+ drv_data->running = true;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+ drv_data->cur_chip = NULL;
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ queue_work(drv_data->workqueue, &drv_data->pump_messages);
+
+ return 0;
+}
+
+static int bfin_spi_stop_queue(struct bfin_spi_master_data *drv_data)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int status = 0;
+
+ spin_lock_irqsave(&drv_data->lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the drv_data->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead
+ */
+ drv_data->running = false;
+ while ((!list_empty(&drv_data->queue) || drv_data->busy) && limit--) {
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+ msleep(10);
+ spin_lock_irqsave(&drv_data->lock, flags);
+ }
+
+ if (!list_empty(&drv_data->queue) || drv_data->busy)
+ status = -EBUSY;
+
+ spin_unlock_irqrestore(&drv_data->lock, flags);
+
+ return status;
+}
+
+static int bfin_spi_destroy_queue(struct bfin_spi_master_data *drv_data)
+{
+ int status;
+
+ status = bfin_spi_stop_queue(drv_data);
+ if (status != 0)
+ return status;
+
+ destroy_workqueue(drv_data->workqueue);
+
+ return 0;
+}
+
+static int bfin_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct bfin5xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct bfin_spi_master_data *drv_data;
+ struct resource *res;
+ int status = 0;
+
+ platform_info = dev_get_platdata(dev);
+
+ /* Allocate master with space for drv_data */
+ master = spi_alloc_master(dev, sizeof(*drv_data));
+ if (!master) {
+ dev_err(&pdev->dev, "can not alloc spi_master\n");
+ return -ENOMEM;
+ }
+
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->master_info = platform_info;
+ drv_data->pdev = pdev;
+ drv_data->pin_req = platform_info->pin_req;
+
+ /* the spi->mode bits supported by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->bus_num = pdev->id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->cleanup = bfin_spi_cleanup;
+ master->setup = bfin_spi_setup;
+ master->transfer = bfin_spi_transfer;
+
+ /* Find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (res == NULL) {
+ dev_err(dev, "Cannot get IORESOURCE_MEM\n");
+ status = -ENOENT;
+ goto out_error_get_res;
+ }
+
+ drv_data->regs = ioremap(res->start, resource_size(res));
+ if (drv_data->regs == NULL) {
+ dev_err(dev, "Cannot map IO\n");
+ status = -ENXIO;
+ goto out_error_ioremap;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (res == NULL) {
+ dev_err(dev, "No DMA channel specified\n");
+ status = -ENOENT;
+ goto out_error_free_io;
+ }
+ drv_data->dma_channel = res->start;
+
+ drv_data->spi_irq = platform_get_irq(pdev, 0);
+ if (drv_data->spi_irq < 0) {
+ dev_err(dev, "No spi pio irq specified\n");
+ status = -ENOENT;
+ goto out_error_free_io;
+ }
+
+ /* Initial and start queue */
+ status = bfin_spi_init_queue(drv_data);
+ if (status != 0) {
+ dev_err(dev, "problem initializing queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = bfin_spi_start_queue(drv_data);
+ if (status != 0) {
+ dev_err(dev, "problem starting queue\n");
+ goto out_error_queue_alloc;
+ }
+
+ status = peripheral_request_list(drv_data->pin_req, DRV_NAME);
+ if (status != 0) {
+ dev_err(&pdev->dev, ": Requesting Peripherals failed\n");
+ goto out_error_queue_alloc;
+ }
+
+ /* Reset SPI registers. If these registers were used by the boot loader,
+ * the sky may fall on your head if you enable the dma controller.
+ */
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = spi_register_master(master);
+ if (status != 0) {
+ dev_err(dev, "problem registering spi master\n");
+ goto out_error_queue_alloc;
+ }
+
+ dev_info(dev, "%s, Version %s, regs@%p, dma channel@%d\n",
+ DRV_DESC, DRV_VERSION, drv_data->regs,
+ drv_data->dma_channel);
+ return status;
+
+out_error_queue_alloc:
+ bfin_spi_destroy_queue(drv_data);
+out_error_free_io:
+ iounmap(drv_data->regs);
+out_error_ioremap:
+out_error_get_res:
+ spi_master_put(master);
+
+ return status;
+}
+
+/* stop hardware and remove the driver */
+static int bfin_spi_remove(struct platform_device *pdev)
+{
+ struct bfin_spi_master_data *drv_data = platform_get_drvdata(pdev);
+ int status = 0;
+
+ if (!drv_data)
+ return 0;
+
+ /* Remove the queue */
+ status = bfin_spi_destroy_queue(drv_data);
+ if (status != 0)
+ return status;
+
+ /* Disable the SSP at the peripheral and SOC level */
+ bfin_spi_disable(drv_data);
+
+ /* Release DMA */
+ if (drv_data->master_info->enable_dma) {
+ if (dma_channel_active(drv_data->dma_channel))
+ free_dma(drv_data->dma_channel);
+ }
+
+ if (drv_data->irq_requested) {
+ free_irq(drv_data->spi_irq, drv_data);
+ drv_data->irq_requested = 0;
+ }
+
+ /* Disconnect from the SPI framework */
+ spi_unregister_master(drv_data->master);
+
+ peripheral_free_list(drv_data->pin_req);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int bfin_spi_suspend(struct device *dev)
+{
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
+ int status = 0;
+
+ status = bfin_spi_stop_queue(drv_data);
+ if (status != 0)
+ return status;
+
+ drv_data->ctrl_reg = bfin_read(&drv_data->regs->ctl);
+ drv_data->flag_reg = bfin_read(&drv_data->regs->flg);
+
+ /*
+ * reset SPI_CTL and SPI_FLG registers
+ */
+ bfin_write(&drv_data->regs->ctl, BIT_CTL_CPHA | BIT_CTL_MASTER);
+ bfin_write(&drv_data->regs->flg, 0xFF00);
+
+ return 0;
+}
+
+static int bfin_spi_resume(struct device *dev)
+{
+ struct bfin_spi_master_data *drv_data = dev_get_drvdata(dev);
+ int status = 0;
+
+ bfin_write(&drv_data->regs->ctl, drv_data->ctrl_reg);
+ bfin_write(&drv_data->regs->flg, drv_data->flag_reg);
+
+ /* Start the queue running */
+ status = bfin_spi_start_queue(drv_data);
+ if (status != 0) {
+ dev_err(dev, "problem starting queue (%d)\n", status);
+ return status;
+ }
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(bfin_spi_pm_ops, bfin_spi_suspend, bfin_spi_resume);
+
+#define BFIN_SPI_PM_OPS (&bfin_spi_pm_ops)
+#else
+#define BFIN_SPI_PM_OPS NULL
+#endif
+
+MODULE_ALIAS("platform:bfin-spi");
+static struct platform_driver bfin_spi_driver = {
+ .driver = {
+ .name = DRV_NAME,
+ .pm = BFIN_SPI_PM_OPS,
+ },
+ .probe = bfin_spi_probe,
+ .remove = bfin_spi_remove,
+};
+
+static int __init bfin_spi_init(void)
+{
+ return platform_driver_register(&bfin_spi_driver);
+}
+subsys_initcall(bfin_spi_init);
+
+static void __exit bfin_spi_exit(void)
+{
+ platform_driver_unregister(&bfin_spi_driver);
+}
+module_exit(bfin_spi_exit);
diff --git a/drivers/spi/spi-bitbang-txrx.h b/drivers/spi/spi-bitbang-txrx.h
new file mode 100644
index 000000000..06b34e5bc
--- /dev/null
+++ b/drivers/spi/spi-bitbang-txrx.h
@@ -0,0 +1,107 @@
+/*
+ * Mix this utility code with some glue code to get one of several types of
+ * simple SPI master driver. Two do polled word-at-a-time I/O:
+ *
+ * - GPIO/parport bitbangers. Provide chipselect() and txrx_word[](),
+ * expanding the per-word routines from the inline templates below.
+ *
+ * - Drivers for controllers resembling bare shift registers. Provide
+ * chipselect() and txrx_word[](), with custom setup()/cleanup() methods
+ * that use your controller's clock and chipselect registers.
+ *
+ * Some hardware works well with requests at spi_transfer scope:
+ *
+ * - Drivers leveraging smarter hardware, with fifos or DMA; or for half
+ * duplex (MicroWire) controllers. Provide chipselect() and txrx_bufs(),
+ * and custom setup()/cleanup() methods.
+ */
+
+/*
+ * The code that knows what GPIO pins do what should have declared four
+ * functions, ideally as inlines, before including this header:
+ *
+ * void setsck(struct spi_device *, int is_on);
+ * void setmosi(struct spi_device *, int is_on);
+ * int getmiso(struct spi_device *);
+ * void spidelay(unsigned);
+ *
+ * setsck()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * setmosi()'s is_on parameter is a zero/nonzero boolean.
+ *
+ * getmiso() is required to return 0 or 1 only. Any other value is invalid
+ * and will result in improper operation.
+ *
+ * A non-inlined routine would call bitbang_txrx_*() routines. The
+ * main loop could easily compile down to a handful of instructions,
+ * especially if the delay is a NOP (to run at peak speed).
+ *
+ * Since this is software, the timings may not be exactly what your board's
+ * chips need ... there may be several reasons you'd need to tweak timings
+ * in these routines, not just to make it faster or slower to match a
+ * particular CPU clock rate.
+ */
+
+static inline u32
+bitbang_txrx_be_cpha0(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_0; else this is SPI_MODE_2 */
+
+ bool oldbit = !(word & 1);
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on trailing edge */
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, !cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on leading edge */
+ word <<= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
+ setsck(spi, cpol);
+ }
+ return word;
+}
+
+static inline u32
+bitbang_txrx_be_cpha1(struct spi_device *spi,
+ unsigned nsecs, unsigned cpol, unsigned flags,
+ u32 word, u8 bits)
+{
+ /* if (cpol == 0) this is SPI_MODE_1; else this is SPI_MODE_3 */
+
+ bool oldbit = !(word & (1 << 31));
+ /* clock starts at inactive polarity */
+ for (word <<= (32 - bits); likely(bits); bits--) {
+
+ /* setup MSB (to slave) on leading edge */
+ setsck(spi, !cpol);
+ if ((flags & SPI_MASTER_NO_TX) == 0) {
+ if ((word & (1 << 31)) != oldbit) {
+ setmosi(spi, word & (1 << 31));
+ oldbit = word & (1 << 31);
+ }
+ }
+ spidelay(nsecs); /* T(setup) */
+
+ setsck(spi, cpol);
+ spidelay(nsecs);
+
+ /* sample MSB (from slave) on trailing edge */
+ word <<= 1;
+ if ((flags & SPI_MASTER_NO_RX) == 0)
+ word |= getmiso(spi);
+ }
+ return word;
+}
diff --git a/drivers/spi/spi-bitbang.c b/drivers/spi/spi-bitbang.c
new file mode 100644
index 000000000..840a4984d
--- /dev/null
+++ b/drivers/spi/spi-bitbang.c
@@ -0,0 +1,475 @@
+/*
+ * polling/bitbanging SPI master controller driver utilities
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * FIRST PART (OPTIONAL): word-at-a-time spi_transfer support.
+ * Use this for GPIO or shift-register level hardware APIs.
+ *
+ * spi_bitbang_cs is in spi_device->controller_state, which is unavailable
+ * to glue code. These bitbang setup() and cleanup() routines are always
+ * used, though maybe they're called from controller-aware code.
+ *
+ * chipselect() and friends may use spi_device->controller_data and
+ * controller registers as appropriate.
+ *
+ *
+ * NOTE: SPI controller pins can often be used as GPIO pins instead,
+ * which means you could use a bitbang driver either to get hardware
+ * working quickly, or testing for differences that aren't speed related.
+ */
+
+struct spi_bitbang_cs {
+ unsigned nsecs; /* (clock cycle time)/2 */
+ u32 (*txrx_word)(struct spi_device *spi, unsigned nsecs,
+ u32 word, u8 bits);
+ unsigned (*txrx_bufs)(struct spi_device *,
+ u32 (*txrx_word)(
+ struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits),
+ unsigned, struct spi_transfer *);
+};
+
+static unsigned bitbang_txrx_8(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits),
+ unsigned ns,
+ struct spi_transfer *t
+) {
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u8 *tx = t->tx_buf;
+ u8 *rx = t->rx_buf;
+
+ while (likely(count > 0)) {
+ u8 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits);
+ if (rx)
+ *rx++ = word;
+ count -= 1;
+ }
+ return t->len - count;
+}
+
+static unsigned bitbang_txrx_16(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits),
+ unsigned ns,
+ struct spi_transfer *t
+) {
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u16 *tx = t->tx_buf;
+ u16 *rx = t->rx_buf;
+
+ while (likely(count > 1)) {
+ u16 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits);
+ if (rx)
+ *rx++ = word;
+ count -= 2;
+ }
+ return t->len - count;
+}
+
+static unsigned bitbang_txrx_32(
+ struct spi_device *spi,
+ u32 (*txrx_word)(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits),
+ unsigned ns,
+ struct spi_transfer *t
+) {
+ unsigned bits = t->bits_per_word;
+ unsigned count = t->len;
+ const u32 *tx = t->tx_buf;
+ u32 *rx = t->rx_buf;
+
+ while (likely(count > 3)) {
+ u32 word = 0;
+
+ if (tx)
+ word = *tx++;
+ word = txrx_word(spi, ns, word, bits);
+ if (rx)
+ *rx++ = word;
+ count -= 4;
+ }
+ return t->len - count;
+}
+
+int spi_bitbang_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ u8 bits_per_word;
+ u32 hz;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ } else {
+ bits_per_word = 0;
+ hz = 0;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+ if (bits_per_word <= 8)
+ cs->txrx_bufs = bitbang_txrx_8;
+ else if (bits_per_word <= 16)
+ cs->txrx_bufs = bitbang_txrx_16;
+ else if (bits_per_word <= 32)
+ cs->txrx_bufs = bitbang_txrx_32;
+ else
+ return -EINVAL;
+
+ /* nsecs = (clock period)/2 */
+ if (!hz)
+ hz = spi->max_speed_hz;
+ if (hz) {
+ cs->nsecs = (1000000000/2) / hz;
+ if (cs->nsecs > (MAX_UDELAY_MS * 1000 * 1000))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup_transfer);
+
+/**
+ * spi_bitbang_setup - default setup for per-word I/O loops
+ */
+int spi_bitbang_setup(struct spi_device *spi)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ struct spi_bitbang *bitbang;
+ unsigned long flags;
+
+ bitbang = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ /* per-word shift register access, in hardware or bitbanging */
+ cs->txrx_word = bitbang->txrx_word[spi->mode & (SPI_CPOL|SPI_CPHA)];
+ if (!cs->txrx_word)
+ return -EINVAL;
+
+ if (bitbang->setup_transfer) {
+ int retval = bitbang->setup_transfer(spi, NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
+
+ /* NOTE we _need_ to call chipselect() early, ideally with adapter
+ * setup, unless the hardware defaults cooperate to avoid confusion
+ * between normal (active low) and inverted chipselects.
+ */
+
+ /* deselect chip (low or high) */
+ spin_lock_irqsave(&bitbang->lock, flags);
+ if (!bitbang->busy) {
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(cs->nsecs);
+ }
+ spin_unlock_irqrestore(&bitbang->lock, flags);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_setup);
+
+/**
+ * spi_bitbang_cleanup - default cleanup for per-word I/O loops
+ */
+void spi_bitbang_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_cleanup);
+
+static int spi_bitbang_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_bitbang_cs *cs = spi->controller_state;
+ unsigned nsecs = cs->nsecs;
+
+ return cs->txrx_bufs(spi, cs->txrx_word, nsecs, t);
+}
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * SECOND PART ... simple transfer queue runner.
+ *
+ * This costs a task context per controller, running the queue by
+ * performing each transfer in sequence. Smarter hardware can queue
+ * several DMA transfers at once, and process several controller queues
+ * in parallel; this driver doesn't match such hardware very well.
+ *
+ * Drivers can provide word-at-a-time i/o primitives, or provide
+ * transfer-at-a-time ones to leverage dma or fifo hardware.
+ */
+
+static int spi_bitbang_prepare_hardware(struct spi_master *spi)
+{
+ struct spi_bitbang *bitbang;
+ unsigned long flags;
+
+ bitbang = spi_master_get_devdata(spi);
+
+ spin_lock_irqsave(&bitbang->lock, flags);
+ bitbang->busy = 1;
+ spin_unlock_irqrestore(&bitbang->lock, flags);
+
+ return 0;
+}
+
+static int spi_bitbang_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_bitbang *bitbang;
+ unsigned nsecs;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+ int do_setup = -1;
+ struct spi_device *spi = m->spi;
+
+ bitbang = spi_master_get_devdata(master);
+
+ /* FIXME this is made-up ... the correct value is known to
+ * word-at-a-time bitbang code, and presumably chipselect()
+ * should enforce these requirements too?
+ */
+ nsecs = 100;
+
+ cs_change = 1;
+ status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+
+ /* override speed or wordsize? */
+ if (t->speed_hz || t->bits_per_word)
+ do_setup = 1;
+
+ /* init (-1) or override (1) transfer params */
+ if (do_setup != 0) {
+ if (bitbang->setup_transfer) {
+ status = bitbang->setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
+ if (do_setup == -1)
+ do_setup = 0;
+ }
+
+ /* set up default clock polarity, and activate chip;
+ * this implicitly updates clock and spi modes as
+ * previously recorded for this device via setup().
+ * (and also deselects any other chip that might be
+ * selected ...)
+ */
+ if (cs_change) {
+ bitbang->chipselect(spi, BITBANG_CS_ACTIVE);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
+ if (!t->tx_buf && !t->rx_buf && t->len) {
+ status = -EINVAL;
+ break;
+ }
+
+ /* transfer data. the lower level code handles any
+ * new dma mappings it needs. our caller always gave
+ * us dma-safe buffers.
+ */
+ if (t->len) {
+ /* REVISIT dma API still needs a designated
+ * DMA_ADDR_INVALID; ~0 might be better.
+ */
+ if (!m->is_dma_mapped)
+ t->rx_dma = t->tx_dma = 0;
+ status = bitbang->txrx_bufs(spi, t);
+ }
+ if (status > 0)
+ m->actual_length += status;
+ if (status != t->len) {
+ /* always report some kind of error */
+ if (status >= 0)
+ status = -EREMOTEIO;
+ break;
+ }
+ status = 0;
+
+ /* protocol tweaks before next transfer */
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change &&
+ !list_is_last(&t->transfer_list, &m->transfers)) {
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
+ }
+
+ m->status = status;
+
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change)) {
+ ndelay(nsecs);
+ bitbang->chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
+
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int spi_bitbang_unprepare_hardware(struct spi_master *spi)
+{
+ struct spi_bitbang *bitbang;
+ unsigned long flags;
+
+ bitbang = spi_master_get_devdata(spi);
+
+ spin_lock_irqsave(&bitbang->lock, flags);
+ bitbang->busy = 0;
+ spin_unlock_irqrestore(&bitbang->lock, flags);
+
+ return 0;
+}
+
+/*----------------------------------------------------------------------*/
+
+/**
+ * spi_bitbang_start - start up a polled/bitbanging SPI master driver
+ * @bitbang: driver handle
+ *
+ * Caller should have zero-initialized all parts of the structure, and then
+ * provided callbacks for chip selection and I/O loops. If the master has
+ * a transfer method, its final step should call spi_bitbang_transfer; or,
+ * that's the default if the transfer routine is not initialized. It should
+ * also set up the bus number and number of chipselects.
+ *
+ * For i/o loops, provide callbacks either per-word (for bitbanging, or for
+ * hardware that basically exposes a shift register) or per-spi_transfer
+ * (which takes better advantage of hardware like fifos or DMA engines).
+ *
+ * Drivers using per-word I/O loops should use (or call) spi_bitbang_setup,
+ * spi_bitbang_cleanup and spi_bitbang_setup_transfer to handle those spi
+ * master methods. Those methods are the defaults if the bitbang->txrx_bufs
+ * routine isn't initialized.
+ *
+ * This routine registers the spi_master, which will process requests in a
+ * dedicated task, keeping IRQs unblocked most of the time. To stop
+ * processing those requests, call spi_bitbang_stop().
+ *
+ * On success, this routine will take a reference to master. The caller is
+ * responsible for calling spi_bitbang_stop() to decrement the reference and
+ * spi_master_put() as counterpart of spi_alloc_master() to prevent a memory
+ * leak.
+ */
+int spi_bitbang_start(struct spi_bitbang *bitbang)
+{
+ struct spi_master *master = bitbang->master;
+ int ret;
+
+ if (!master || !bitbang->chipselect)
+ return -EINVAL;
+
+ spin_lock_init(&bitbang->lock);
+
+ if (!master->mode_bits)
+ master->mode_bits = SPI_CPOL | SPI_CPHA | bitbang->flags;
+
+ if (master->transfer || master->transfer_one_message)
+ return -EINVAL;
+
+ master->prepare_transfer_hardware = spi_bitbang_prepare_hardware;
+ master->unprepare_transfer_hardware = spi_bitbang_unprepare_hardware;
+ master->transfer_one_message = spi_bitbang_transfer_one;
+
+ if (!bitbang->txrx_bufs) {
+ bitbang->use_dma = 0;
+ bitbang->txrx_bufs = spi_bitbang_bufs;
+ if (!master->setup) {
+ if (!bitbang->setup_transfer)
+ bitbang->setup_transfer =
+ spi_bitbang_setup_transfer;
+ master->setup = spi_bitbang_setup;
+ master->cleanup = spi_bitbang_cleanup;
+ }
+ }
+
+ /* driver may get busy before register() returns, especially
+ * if someone registered boardinfo for devices
+ */
+ ret = spi_register_master(spi_master_get(master));
+ if (ret)
+ spi_master_put(master);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_start);
+
+/**
+ * spi_bitbang_stop - stops the task providing spi communication
+ */
+void spi_bitbang_stop(struct spi_bitbang *bitbang)
+{
+ spi_unregister_master(bitbang->master);
+}
+EXPORT_SYMBOL_GPL(spi_bitbang_stop);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-butterfly.c b/drivers/spi/spi-butterfly.c
new file mode 100644
index 000000000..9a9586298
--- /dev/null
+++ b/drivers/spi/spi-butterfly.c
@@ -0,0 +1,351 @@
+/*
+ * parport-to-butterfly adapter
+ *
+ * Copyright (C) 2005 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/flash.h>
+
+#include <linux/mtd/partitions.h>
+
+
+/*
+ * This uses SPI to talk with an "AVR Butterfly", which is a $US20 card
+ * with a battery powered AVR microcontroller and lots of goodies. You
+ * can use GCC to develop firmware for this.
+ *
+ * See Documentation/spi/butterfly for information about how to build
+ * and use this custom parallel port cable.
+ */
+
+
+/* DATA output bits (pins 2..9 == D0..D7) */
+#define butterfly_nreset (1 << 1) /* pin 3 */
+
+#define spi_sck_bit (1 << 0) /* pin 2 */
+#define spi_mosi_bit (1 << 7) /* pin 9 */
+
+#define vcc_bits ((1 << 6) | (1 << 5)) /* pins 7, 8 */
+
+/* STATUS input bits */
+#define spi_miso_bit PARPORT_STATUS_BUSY /* pin 11 */
+
+/* CONTROL output bits */
+#define spi_cs_bit PARPORT_CONTROL_SELECT /* pin 17 */
+
+
+
+static inline struct butterfly *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+
+struct butterfly {
+ /* REVISIT ... for now, this must be first */
+ struct spi_bitbang bitbang;
+
+ struct parport *port;
+ struct pardevice *pd;
+
+ u8 lastbyte;
+
+ struct spi_device *dataflash;
+ struct spi_device *butterfly;
+ struct spi_board_info info[2];
+
+};
+
+/*----------------------------------------------------------------------*/
+
+static inline void
+setsck(struct spi_device *spi, int is_on)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ u8 bit, byte = pp->lastbyte;
+
+ bit = spi_sck_bit;
+
+ if (is_on)
+ byte |= bit;
+ else
+ byte &= ~bit;
+ parport_write_data(pp->port, byte);
+ pp->lastbyte = byte;
+}
+
+static inline void
+setmosi(struct spi_device *spi, int is_on)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ u8 bit, byte = pp->lastbyte;
+
+ bit = spi_mosi_bit;
+
+ if (is_on)
+ byte |= bit;
+ else
+ byte &= ~bit;
+ parport_write_data(pp->port, byte);
+ pp->lastbyte = byte;
+}
+
+static inline int getmiso(struct spi_device *spi)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+ int value;
+ u8 bit;
+
+ bit = spi_miso_bit;
+
+ /* only STATUS_BUSY is NOT negated */
+ value = !(parport_read_status(pp->port) & bit);
+ return (bit == PARPORT_STATUS_BUSY) ? value : !value;
+}
+
+static void butterfly_chipselect(struct spi_device *spi, int value)
+{
+ struct butterfly *pp = spidev_to_pp(spi);
+
+ /* set default clock polarity */
+ if (value != BITBANG_CS_INACTIVE)
+ setsck(spi, spi->mode & SPI_CPOL);
+
+ /* here, value == "activate or not";
+ * most PARPORT_CONTROL_* bits are negated, so we must
+ * morph it to value == "bit value to write in control register"
+ */
+ if (spi_cs_bit == PARPORT_CONTROL_INIT)
+ value = !value;
+
+ parport_frob_control(pp->port, spi_cs_bit, value ? spi_cs_bit : 0);
+}
+
+
+/* we only needed to implement one mode here, and choose SPI_MODE_0 */
+
+#define spidelay(X) do { } while (0)
+/* #define spidelay ndelay */
+
+#include "spi-bitbang-txrx.h"
+
+static u32
+butterfly_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs,
+ u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+}
+
+/*----------------------------------------------------------------------*/
+
+/* override default partitioning with cmdlinepart */
+static struct mtd_partition partitions[] = { {
+ /* JFFS2 wants partitions of 4*N blocks for this device,
+ * so sectors 0 and 1 can't be partitions by themselves.
+ */
+
+ /* sector 0 = 8 pages * 264 bytes/page (1 block)
+ * sector 1 = 248 pages * 264 bytes/page
+ */
+ .name = "bookkeeping", /* 66 KB */
+ .offset = 0,
+ .size = (8 + 248) * 264,
+ /* .mask_flags = MTD_WRITEABLE, */
+}, {
+ /* sector 2 = 256 pages * 264 bytes/page
+ * sectors 3-5 = 512 pages * 264 bytes/page
+ */
+ .name = "filesystem", /* 462 KB */
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+} };
+
+static struct flash_platform_data flash = {
+ .name = "butterflash",
+ .parts = partitions,
+ .nr_parts = ARRAY_SIZE(partitions),
+};
+
+
+/* REVISIT remove this ugly global and its "only one" limitation */
+static struct butterfly *butterfly;
+
+static void butterfly_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ int status;
+ struct butterfly *pp;
+ struct spi_master *master;
+ struct device *dev = p->physport->dev;
+
+ if (butterfly || !dev)
+ return;
+
+ /* REVISIT: this just _assumes_ a butterfly is there ... no probe,
+ * and no way to be selective about what it binds to.
+ */
+
+ master = spi_alloc_master(dev, sizeof(*pp));
+ if (!master) {
+ status = -ENOMEM;
+ goto done;
+ }
+ pp = spi_master_get_devdata(master);
+
+ /*
+ * SPI and bitbang hookup
+ *
+ * use default setup(), cleanup(), and transfer() methods; and
+ * only bother implementing mode 0. Start it later.
+ */
+ master->bus_num = 42;
+ master->num_chipselect = 2;
+
+ pp->bitbang.master = master;
+ pp->bitbang.chipselect = butterfly_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = butterfly_txrx_word_mode0;
+
+ /*
+ * parport hookup
+ */
+ pp->port = p;
+ pd = parport_register_device(p, "spi_butterfly",
+ NULL, NULL, NULL,
+ 0 /* FLAGS */, pp);
+ if (!pd) {
+ status = -ENOMEM;
+ goto clean0;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto clean1;
+
+ /*
+ * Butterfly reset, powerup, run firmware
+ */
+ pr_debug("%s: powerup/reset Butterfly\n", p->name);
+
+ /* nCS for dataflash (this bit is inverted on output) */
+ parport_frob_control(pp->port, spi_cs_bit, 0);
+
+ /* stabilize power with chip in reset (nRESET), and
+ * spi_sck_bit clear (CPOL=0)
+ */
+ pp->lastbyte |= vcc_bits;
+ parport_write_data(pp->port, pp->lastbyte);
+ msleep(5);
+
+ /* take it out of reset; assume long reset delay */
+ pp->lastbyte |= butterfly_nreset;
+ parport_write_data(pp->port, pp->lastbyte);
+ msleep(100);
+
+
+ /*
+ * Start SPI ... for now, hide that we're two physical busses.
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0)
+ goto clean2;
+
+ /* Bus 1 lets us talk to at45db041b (firmware disables AVR SPI), AVR
+ * (firmware resets at45, acts as spi slave) or neither (we ignore
+ * both, AVR uses AT45). Here we expect firmware for the first option.
+ */
+
+ pp->info[0].max_speed_hz = 15 * 1000 * 1000;
+ strcpy(pp->info[0].modalias, "mtd_dataflash");
+ pp->info[0].platform_data = &flash;
+ pp->info[0].chip_select = 1;
+ pp->info[0].controller_data = pp;
+ pp->dataflash = spi_new_device(pp->bitbang.master, &pp->info[0]);
+ if (pp->dataflash)
+ pr_debug("%s: dataflash at %s\n", p->name,
+ dev_name(&pp->dataflash->dev));
+
+ pr_info("%s: AVR Butterfly\n", p->name);
+ butterfly = pp;
+ return;
+
+clean2:
+ /* turn off VCC */
+ parport_write_data(pp->port, 0);
+
+ parport_release(pp->pd);
+clean1:
+ parport_unregister_device(pd);
+clean0:
+ (void) spi_master_put(pp->bitbang.master);
+done:
+ pr_debug("%s: butterfly probe, fail %d\n", p->name, status);
+}
+
+static void butterfly_detach(struct parport *p)
+{
+ struct butterfly *pp;
+
+ /* FIXME this global is ugly ... but, how to quickly get from
+ * the parport to the "struct butterfly" associated with it?
+ * "old school" driver-internal device lists?
+ */
+ if (!butterfly || butterfly->port != p)
+ return;
+ pp = butterfly;
+ butterfly = NULL;
+
+ /* stop() unregisters child devices too */
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* turn off VCC */
+ parport_write_data(pp->port, 0);
+ msleep(10);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ (void) spi_master_put(pp->bitbang.master);
+}
+
+static struct parport_driver butterfly_driver = {
+ .name = "spi_butterfly",
+ .attach = butterfly_attach,
+ .detach = butterfly_detach,
+};
+
+
+static int __init butterfly_init(void)
+{
+ return parport_register_driver(&butterfly_driver);
+}
+device_initcall(butterfly_init);
+
+static void __exit butterfly_exit(void)
+{
+ parport_unregister_driver(&butterfly_driver);
+}
+module_exit(butterfly_exit);
+
+MODULE_DESCRIPTION("Parport Adapter driver for AVR Butterfly");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-cadence.c b/drivers/spi/spi-cadence.c
new file mode 100644
index 000000000..5a6749881
--- /dev/null
+++ b/drivers/spi/spi-cadence.c
@@ -0,0 +1,692 @@
+/*
+ * Cadence SPI controller driver (master mode only)
+ *
+ * Copyright (C) 2008 - 2014 Xilinx, Inc.
+ *
+ * based on Blackfin On-Chip SPI Driver (spi_bfin5xx.c)
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of_irq.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+
+/* Name of this driver */
+#define CDNS_SPI_NAME "cdns-spi"
+
+/* Register offset definitions */
+#define CDNS_SPI_CR_OFFSET 0x00 /* Configuration Register, RW */
+#define CDNS_SPI_ISR_OFFSET 0x04 /* Interrupt Status Register, RO */
+#define CDNS_SPI_IER_OFFSET 0x08 /* Interrupt Enable Register, WO */
+#define CDNS_SPI_IDR_OFFSET 0x0c /* Interrupt Disable Register, WO */
+#define CDNS_SPI_IMR_OFFSET 0x10 /* Interrupt Enabled Mask Register, RO */
+#define CDNS_SPI_ER_OFFSET 0x14 /* Enable/Disable Register, RW */
+#define CDNS_SPI_DR_OFFSET 0x18 /* Delay Register, RW */
+#define CDNS_SPI_TXD_OFFSET 0x1C /* Data Transmit Register, WO */
+#define CDNS_SPI_RXD_OFFSET 0x20 /* Data Receive Register, RO */
+#define CDNS_SPI_SICR_OFFSET 0x24 /* Slave Idle Count Register, RW */
+#define CDNS_SPI_THLD_OFFSET 0x28 /* Transmit FIFO Watermark Register,RW */
+
+/*
+ * SPI Configuration Register bit Masks
+ *
+ * This register contains various control bits that affect the operation
+ * of the SPI controller
+ */
+#define CDNS_SPI_CR_MANSTRT_MASK 0x00010000 /* Manual TX Start */
+#define CDNS_SPI_CR_CPHA_MASK 0x00000004 /* Clock Phase Control */
+#define CDNS_SPI_CR_CPOL_MASK 0x00000002 /* Clock Polarity Control */
+#define CDNS_SPI_CR_SSCTRL_MASK 0x00003C00 /* Slave Select Mask */
+#define CDNS_SPI_CR_PERI_SEL_MASK 0x00000200 /* Peripheral Select Decode */
+#define CDNS_SPI_CR_BAUD_DIV_MASK 0x00000038 /* Baud Rate Divisor Mask */
+#define CDNS_SPI_CR_MSTREN_MASK 0x00000001 /* Master Enable Mask */
+#define CDNS_SPI_CR_MANSTRTEN_MASK 0x00008000 /* Manual TX Enable Mask */
+#define CDNS_SPI_CR_SSFORCE_MASK 0x00004000 /* Manual SS Enable Mask */
+#define CDNS_SPI_CR_BAUD_DIV_4_MASK 0x00000008 /* Default Baud Div Mask */
+#define CDNS_SPI_CR_DEFAULT_MASK (CDNS_SPI_CR_MSTREN_MASK | \
+ CDNS_SPI_CR_SSCTRL_MASK | \
+ CDNS_SPI_CR_SSFORCE_MASK | \
+ CDNS_SPI_CR_BAUD_DIV_4_MASK)
+
+/*
+ * SPI Configuration Register - Baud rate and slave select
+ *
+ * These are the values used in the calculation of baud rate divisor and
+ * setting the slave select.
+ */
+
+#define CDNS_SPI_BAUD_DIV_MAX 7 /* Baud rate divisor maximum */
+#define CDNS_SPI_BAUD_DIV_MIN 1 /* Baud rate divisor minimum */
+#define CDNS_SPI_BAUD_DIV_SHIFT 3 /* Baud rate divisor shift in CR */
+#define CDNS_SPI_SS_SHIFT 10 /* Slave Select field shift in CR */
+#define CDNS_SPI_SS0 0x1 /* Slave Select zero */
+
+/*
+ * SPI Interrupt Registers bit Masks
+ *
+ * All the four interrupt registers (Status/Mask/Enable/Disable) have the same
+ * bit definitions.
+ */
+#define CDNS_SPI_IXR_TXOW_MASK 0x00000004 /* SPI TX FIFO Overwater */
+#define CDNS_SPI_IXR_MODF_MASK 0x00000002 /* SPI Mode Fault */
+#define CDNS_SPI_IXR_RXNEMTY_MASK 0x00000010 /* SPI RX FIFO Not Empty */
+#define CDNS_SPI_IXR_DEFAULT_MASK (CDNS_SPI_IXR_TXOW_MASK | \
+ CDNS_SPI_IXR_MODF_MASK)
+#define CDNS_SPI_IXR_TXFULL_MASK 0x00000008 /* SPI TX Full */
+#define CDNS_SPI_IXR_ALL_MASK 0x0000007F /* SPI all interrupts */
+
+/*
+ * SPI Enable Register bit Masks
+ *
+ * This register is used to enable or disable the SPI controller
+ */
+#define CDNS_SPI_ER_ENABLE_MASK 0x00000001 /* SPI Enable Bit Mask */
+#define CDNS_SPI_ER_DISABLE_MASK 0x0 /* SPI Disable Bit Mask */
+
+/* SPI FIFO depth in bytes */
+#define CDNS_SPI_FIFO_DEPTH 128
+
+/* Default number of chip select lines */
+#define CDNS_SPI_DEFAULT_NUM_CS 4
+
+/**
+ * struct cdns_spi - This definition defines spi driver instance
+ * @regs: Virtual address of the SPI controller registers
+ * @ref_clk: Pointer to the peripheral clock
+ * @pclk: Pointer to the APB clock
+ * @speed_hz: Current SPI bus clock speed in Hz
+ * @txbuf: Pointer to the TX buffer
+ * @rxbuf: Pointer to the RX buffer
+ * @tx_bytes: Number of bytes left to transfer
+ * @rx_bytes: Number of bytes requested
+ * @dev_busy: Device busy flag
+ * @is_decoded_cs: Flag for decoder property set or not
+ */
+struct cdns_spi {
+ void __iomem *regs;
+ struct clk *ref_clk;
+ struct clk *pclk;
+ u32 speed_hz;
+ const u8 *txbuf;
+ u8 *rxbuf;
+ int tx_bytes;
+ int rx_bytes;
+ u8 dev_busy;
+ u32 is_decoded_cs;
+};
+
+/* Macros for the SPI controller read/write */
+static inline u32 cdns_spi_read(struct cdns_spi *xspi, u32 offset)
+{
+ return readl_relaxed(xspi->regs + offset);
+}
+
+static inline void cdns_spi_write(struct cdns_spi *xspi, u32 offset, u32 val)
+{
+ writel_relaxed(val, xspi->regs + offset);
+}
+
+/**
+ * cdns_spi_init_hw - Initialize the hardware and configure the SPI controller
+ * @xspi: Pointer to the cdns_spi structure
+ *
+ * On reset the SPI controller is configured to be in master mode, baud rate
+ * divisor is set to 4, threshold value for TX FIFO not full interrupt is set
+ * to 1 and size of the word to be transferred as 8 bit.
+ * This function initializes the SPI controller to disable and clear all the
+ * interrupts, enable manual slave select and manual start, deselect all the
+ * chip select lines, and enable the SPI controller.
+ */
+static void cdns_spi_init_hw(struct cdns_spi *xspi)
+{
+ u32 ctrl_reg = CDNS_SPI_CR_DEFAULT_MASK;
+
+ if (xspi->is_decoded_cs)
+ ctrl_reg |= CDNS_SPI_CR_PERI_SEL_MASK;
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_ALL_MASK);
+
+ /* Clear the RX FIFO */
+ while (cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET) &
+ CDNS_SPI_IXR_RXNEMTY_MASK)
+ cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+
+ cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET,
+ CDNS_SPI_IXR_ALL_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_ENABLE_MASK);
+}
+
+/**
+ * cdns_spi_chipselect - Select or deselect the chip select line
+ * @spi: Pointer to the spi_device structure
+ * @is_on: Select(0) or deselect (1) the chip select line
+ */
+static void cdns_spi_chipselect(struct spi_device *spi, bool is_high)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg;
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ if (is_high) {
+ /* Deselect the slave */
+ ctrl_reg |= CDNS_SPI_CR_SSCTRL_MASK;
+ } else {
+ /* Select the slave */
+ ctrl_reg &= ~CDNS_SPI_CR_SSCTRL_MASK;
+ if (!(xspi->is_decoded_cs))
+ ctrl_reg |= ((~(CDNS_SPI_SS0 << spi->chip_select)) <<
+ CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL_MASK;
+ else
+ ctrl_reg |= (spi->chip_select << CDNS_SPI_SS_SHIFT) &
+ CDNS_SPI_CR_SSCTRL_MASK;
+ }
+
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+}
+
+/**
+ * cdns_spi_config_clock_mode - Sets clock polarity and phase
+ * @spi: Pointer to the spi_device structure
+ *
+ * Sets the requested clock polarity and phase.
+ */
+static void cdns_spi_config_clock_mode(struct spi_device *spi)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg, new_ctrl_reg;
+
+ new_ctrl_reg = ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ /* Set the SPI clock phase and clock polarity */
+ new_ctrl_reg &= ~(CDNS_SPI_CR_CPHA_MASK | CDNS_SPI_CR_CPOL_MASK);
+ if (spi->mode & SPI_CPHA)
+ new_ctrl_reg |= CDNS_SPI_CR_CPHA_MASK;
+ if (spi->mode & SPI_CPOL)
+ new_ctrl_reg |= CDNS_SPI_CR_CPOL_MASK;
+
+ if (new_ctrl_reg != ctrl_reg) {
+ /*
+ * Just writing the CR register does not seem to apply the clock
+ * setting changes. This is problematic when changing the clock
+ * polarity as it will cause the SPI slave to see spurious clock
+ * transitions. To workaround the issue toggle the ER register.
+ */
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, new_ctrl_reg);
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_ENABLE_MASK);
+ }
+}
+
+/**
+ * cdns_spi_config_clock_freq - Sets clock frequency
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the requested clock frequency.
+ * Note: If the requested frequency is not an exact match with what can be
+ * obtained using the prescalar value the driver sets the clock frequency which
+ * is lower than the requested frequency (maximum lower) for the transfer. If
+ * the requested frequency is higher or lower than that is supported by the SPI
+ * controller the driver will set the highest or lowest frequency supported by
+ * controller.
+ */
+static void cdns_spi_config_clock_freq(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+ u32 ctrl_reg, baud_rate_val;
+ unsigned long frequency;
+
+ frequency = clk_get_rate(xspi->ref_clk);
+
+ ctrl_reg = cdns_spi_read(xspi, CDNS_SPI_CR_OFFSET);
+
+ /* Set the clock frequency */
+ if (xspi->speed_hz != transfer->speed_hz) {
+ /* first valid value is 1 */
+ baud_rate_val = CDNS_SPI_BAUD_DIV_MIN;
+ while ((baud_rate_val < CDNS_SPI_BAUD_DIV_MAX) &&
+ (frequency / (2 << baud_rate_val)) > transfer->speed_hz)
+ baud_rate_val++;
+
+ ctrl_reg &= ~CDNS_SPI_CR_BAUD_DIV_MASK;
+ ctrl_reg |= baud_rate_val << CDNS_SPI_BAUD_DIV_SHIFT;
+
+ xspi->speed_hz = frequency / (2 << baud_rate_val);
+ }
+ cdns_spi_write(xspi, CDNS_SPI_CR_OFFSET, ctrl_reg);
+}
+
+/**
+ * cdns_spi_setup_transfer - Configure SPI controller for specified transfer
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer setup parameters
+ *
+ * Sets the operational mode of SPI controller for the next SPI transfer and
+ * sets the requested clock frequency.
+ *
+ * Return: Always 0
+ */
+static int cdns_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(spi->master);
+
+ cdns_spi_config_clock_freq(spi, transfer);
+
+ dev_dbg(&spi->dev, "%s, mode %d, %u bits/w, %u clock speed\n",
+ __func__, spi->mode, spi->bits_per_word,
+ xspi->speed_hz);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_fill_tx_fifo - Fills the TX FIFO with as many bytes as possible
+ * @xspi: Pointer to the cdns_spi structure
+ */
+static void cdns_spi_fill_tx_fifo(struct cdns_spi *xspi)
+{
+ unsigned long trans_cnt = 0;
+
+ while ((trans_cnt < CDNS_SPI_FIFO_DEPTH) &&
+ (xspi->tx_bytes > 0)) {
+ if (xspi->txbuf)
+ cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET,
+ *xspi->txbuf++);
+ else
+ cdns_spi_write(xspi, CDNS_SPI_TXD_OFFSET, 0);
+
+ xspi->tx_bytes--;
+ trans_cnt++;
+ }
+}
+
+/**
+ * cdns_spi_irq - Interrupt service routine of the SPI controller
+ * @irq: IRQ number
+ * @dev_id: Pointer to the xspi structure
+ *
+ * This function handles TX empty and Mode Fault interrupts only.
+ * On TX empty interrupt this function reads the received data from RX FIFO and
+ * fills the TX FIFO if there is any data remaining to be transferred.
+ * On Mode Fault interrupt this function indicates that transfer is completed,
+ * the SPI subsystem will identify the error as the remaining bytes to be
+ * transferred is non-zero.
+ *
+ * Return: IRQ_HANDLED when handled; IRQ_NONE otherwise.
+ */
+static irqreturn_t cdns_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ u32 intr_status, status;
+
+ status = IRQ_NONE;
+ intr_status = cdns_spi_read(xspi, CDNS_SPI_ISR_OFFSET);
+ cdns_spi_write(xspi, CDNS_SPI_ISR_OFFSET, intr_status);
+
+ if (intr_status & CDNS_SPI_IXR_MODF_MASK) {
+ /* Indicate that transfer is completed, the SPI subsystem will
+ * identify the error as the remaining bytes to be
+ * transferred is non-zero
+ */
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ spi_finalize_current_transfer(master);
+ status = IRQ_HANDLED;
+ } else if (intr_status & CDNS_SPI_IXR_TXOW_MASK) {
+ unsigned long trans_cnt;
+
+ trans_cnt = xspi->rx_bytes - xspi->tx_bytes;
+
+ /* Read out the data from the RX FIFO */
+ while (trans_cnt) {
+ u8 data;
+
+ data = cdns_spi_read(xspi, CDNS_SPI_RXD_OFFSET);
+ if (xspi->rxbuf)
+ *xspi->rxbuf++ = data;
+
+ xspi->rx_bytes--;
+ trans_cnt--;
+ }
+
+ if (xspi->tx_bytes) {
+ /* There is more data to send */
+ cdns_spi_fill_tx_fifo(xspi);
+ } else {
+ /* Transfer is completed */
+ cdns_spi_write(xspi, CDNS_SPI_IDR_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ spi_finalize_current_transfer(master);
+ }
+ status = IRQ_HANDLED;
+ }
+
+ return status;
+}
+static int cdns_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ cdns_spi_config_clock_mode(msg->spi);
+ return 0;
+}
+
+/**
+ * cdns_transfer_one - Initiates the SPI transfer
+ * @master: Pointer to spi_master structure
+ * @spi: Pointer to the spi_device structure
+ * @transfer: Pointer to the spi_transfer structure which provides
+ * information about next transfer parameters
+ *
+ * This function fills the TX FIFO, starts the SPI transfer and
+ * returns a positive transfer count so that core will wait for completion.
+ *
+ * Return: Number of bytes transferred in the last transfer
+ */
+static int cdns_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ xspi->txbuf = transfer->tx_buf;
+ xspi->rxbuf = transfer->rx_buf;
+ xspi->tx_bytes = transfer->len;
+ xspi->rx_bytes = transfer->len;
+
+ cdns_spi_setup_transfer(spi, transfer);
+
+ cdns_spi_fill_tx_fifo(xspi);
+
+ cdns_spi_write(xspi, CDNS_SPI_IER_OFFSET,
+ CDNS_SPI_IXR_DEFAULT_MASK);
+ return transfer->len;
+}
+
+/**
+ * cdns_prepare_transfer_hardware - Prepares hardware for transfer.
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function enables SPI master controller.
+ *
+ * Return: 0 always
+ */
+static int cdns_prepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_ENABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * cdns_unprepare_transfer_hardware - Relaxes hardware after transfer
+ * @master: Pointer to the spi_master structure which provides
+ * information about the controller.
+ *
+ * This function disables the SPI master controller.
+ *
+ * Return: 0 always
+ */
+static int cdns_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_probe - Probe method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function initializes the driver data structures and the hardware.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_probe(struct platform_device *pdev)
+{
+ int ret = 0, irq;
+ struct spi_master *master;
+ struct cdns_spi *xspi;
+ struct resource *res;
+ u32 num_cs;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*xspi));
+ if (master == NULL)
+ return -ENOMEM;
+
+ xspi = spi_master_get_devdata(master);
+ master->dev.of_node = pdev->dev.of_node;
+ platform_set_drvdata(pdev, master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto remove_master;
+ }
+
+ xspi->pclk = devm_clk_get(&pdev->dev, "pclk");
+ if (IS_ERR(xspi->pclk)) {
+ dev_err(&pdev->dev, "pclk clock not found.\n");
+ ret = PTR_ERR(xspi->pclk);
+ goto remove_master;
+ }
+
+ xspi->ref_clk = devm_clk_get(&pdev->dev, "ref_clk");
+ if (IS_ERR(xspi->ref_clk)) {
+ dev_err(&pdev->dev, "ref_clk clock not found.\n");
+ ret = PTR_ERR(xspi->ref_clk);
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable APB clock.\n");
+ goto remove_master;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to enable device clock.\n");
+ goto clk_dis_apb;
+ }
+
+ ret = of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+ if (ret < 0)
+ master->num_chipselect = CDNS_SPI_DEFAULT_NUM_CS;
+ else
+ master->num_chipselect = num_cs;
+
+ ret = of_property_read_u32(pdev->dev.of_node, "is-decoded-cs",
+ &xspi->is_decoded_cs);
+ if (ret < 0)
+ xspi->is_decoded_cs = 0;
+
+ /* SPI controller initializations */
+ cdns_spi_init_hw(xspi);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "irq number is invalid\n");
+ goto remove_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, cdns_spi_irq,
+ 0, pdev->name, master);
+ if (ret != 0) {
+ ret = -ENXIO;
+ dev_err(&pdev->dev, "request_irq failed\n");
+ goto remove_master;
+ }
+
+ master->prepare_transfer_hardware = cdns_prepare_transfer_hardware;
+ master->prepare_message = cdns_prepare_message;
+ master->transfer_one = cdns_transfer_one;
+ master->unprepare_transfer_hardware = cdns_unprepare_transfer_hardware;
+ master->set_cs = cdns_spi_chipselect;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+
+ /* Set to default valid value */
+ master->max_speed_hz = clk_get_rate(xspi->ref_clk) / 4;
+ xspi->speed_hz = master->max_speed_hz;
+
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ ret = spi_register_master(master);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_register_master failed\n");
+ goto clk_dis_all;
+ }
+
+ return ret;
+
+clk_dis_all:
+ clk_disable_unprepare(xspi->ref_clk);
+clk_dis_apb:
+ clk_disable_unprepare(xspi->pclk);
+remove_master:
+ spi_master_put(master);
+ return ret;
+}
+
+/**
+ * cdns_spi_remove - Remove method for the SPI driver
+ * @pdev: Pointer to the platform_device structure
+ *
+ * This function is called if a device is physically removed from the system or
+ * if the driver module is being unloaded. It frees all resources allocated to
+ * the device.
+ *
+ * Return: 0 on success and error value on error
+ */
+static int cdns_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ cdns_spi_write(xspi, CDNS_SPI_ER_OFFSET,
+ CDNS_SPI_ER_DISABLE_MASK);
+
+ clk_disable_unprepare(xspi->ref_clk);
+ clk_disable_unprepare(xspi->pclk);
+
+ spi_unregister_master(master);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_suspend - Suspend method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function disables the SPI controller and
+ * changes the driver state to "suspend"
+ *
+ * Return: Always 0
+ */
+static int __maybe_unused cdns_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+
+ clk_disable_unprepare(xspi->ref_clk);
+
+ clk_disable_unprepare(xspi->pclk);
+
+ return 0;
+}
+
+/**
+ * cdns_spi_resume - Resume method for the SPI driver
+ * @dev: Address of the platform_device structure
+ *
+ * This function changes the driver state to "ready"
+ *
+ * Return: 0 on success and error value on error
+ */
+static int __maybe_unused cdns_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = container_of(dev,
+ struct platform_device, dev);
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct cdns_spi *xspi = spi_master_get_devdata(master);
+ int ret = 0;
+
+ ret = clk_prepare_enable(xspi->pclk);
+ if (ret) {
+ dev_err(dev, "Cannot enable APB clock.\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(xspi->ref_clk);
+ if (ret) {
+ dev_err(dev, "Cannot enable device clock.\n");
+ clk_disable(xspi->pclk);
+ return ret;
+ }
+ spi_master_resume(master);
+
+ return 0;
+}
+
+static SIMPLE_DEV_PM_OPS(cdns_spi_dev_pm_ops, cdns_spi_suspend,
+ cdns_spi_resume);
+
+static const struct of_device_id cdns_spi_of_match[] = {
+ { .compatible = "xlnx,zynq-spi-r1p6" },
+ { .compatible = "cdns,spi-r1p6" },
+ { /* end of table */ }
+};
+MODULE_DEVICE_TABLE(of, cdns_spi_of_match);
+
+/* cdns_spi_driver - This structure defines the SPI subsystem platform driver */
+static struct platform_driver cdns_spi_driver = {
+ .probe = cdns_spi_probe,
+ .remove = cdns_spi_remove,
+ .driver = {
+ .name = CDNS_SPI_NAME,
+ .of_match_table = cdns_spi_of_match,
+ .pm = &cdns_spi_dev_pm_ops,
+ },
+};
+
+module_platform_driver(cdns_spi_driver);
+
+MODULE_AUTHOR("Xilinx, Inc.");
+MODULE_DESCRIPTION("Cadence SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-clps711x.c b/drivers/spi/spi-clps711x.c
new file mode 100644
index 000000000..8c30de031
--- /dev/null
+++ b/drivers/spi/spi-clps711x.c
@@ -0,0 +1,214 @@
+/*
+ * CLPS711X SPI bus driver
+ *
+ * Copyright (C) 2012-2014 Alexander Shiyan <shc_work@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/mfd/syscon.h>
+#include <linux/mfd/syscon/clps711x.h>
+#include <linux/spi/spi.h>
+#include <linux/platform_data/spi-clps711x.h>
+
+#define DRIVER_NAME "spi-clps711x"
+
+#define SYNCIO_FRMLEN(x) ((x) << 8)
+#define SYNCIO_TXFRMEN (1 << 14)
+
+struct spi_clps711x_data {
+ void __iomem *syncio;
+ struct regmap *syscon;
+ struct clk *spi_clk;
+
+ u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned int bpw;
+ int len;
+};
+
+static int spi_clps711x_setup(struct spi_device *spi)
+{
+ /* We are expect that SPI-device is not selected */
+ gpio_direction_output(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+ return 0;
+}
+
+static int spi_clps711x_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ /* Setup mode for transfer */
+ return regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCKNSEN,
+ (spi->mode & SPI_CPHA) ?
+ SYSCON3_ADCCKNSEN : 0);
+}
+
+static int spi_clps711x_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ clk_set_rate(hw->spi_clk, xfer->speed_hz ? : spi->max_speed_hz);
+
+ hw->len = xfer->len;
+ hw->bpw = xfer->bits_per_word;
+ hw->tx_buf = (u8 *)xfer->tx_buf;
+ hw->rx_buf = (u8 *)xfer->rx_buf;
+
+ /* Initiate transfer */
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN, hw->syncio);
+
+ return 1;
+}
+
+static irqreturn_t spi_clps711x_isr(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct spi_clps711x_data *hw = spi_master_get_devdata(master);
+ u8 data;
+
+ /* Handle RX */
+ data = readb(hw->syncio);
+ if (hw->rx_buf)
+ *hw->rx_buf++ = data;
+
+ /* Handle TX */
+ if (--hw->len > 0) {
+ data = hw->tx_buf ? *hw->tx_buf++ : 0;
+ writel(data | SYNCIO_FRMLEN(hw->bpw) | SYNCIO_TXFRMEN,
+ hw->syncio);
+ } else
+ spi_finalize_current_transfer(master);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_clps711x_probe(struct platform_device *pdev)
+{
+ struct spi_clps711x_data *hw;
+ struct spi_clps711x_pdata *pdata = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct resource *res;
+ int i, irq, ret;
+
+ if (!pdata) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ return -EINVAL;
+ }
+
+ if (pdata->num_chipselect < 1) {
+ dev_err(&pdev->dev, "At least one CS must be defined\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hw));
+ if (!master)
+ return -ENOMEM;
+
+ master->cs_gpios = devm_kzalloc(&pdev->dev, sizeof(int) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (!master->cs_gpios) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 8);
+ master->num_chipselect = pdata->num_chipselect;
+ master->setup = spi_clps711x_setup;
+ master->prepare_message = spi_clps711x_prepare_message;
+ master->transfer_one = spi_clps711x_transfer_one;
+
+ hw = spi_master_get_devdata(master);
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ master->cs_gpios[i] = pdata->chipselect[i];
+ ret = devm_gpio_request(&pdev->dev, master->cs_gpios[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "Can't get CS GPIO %i\n", i);
+ goto err_out;
+ }
+ }
+
+ hw->spi_clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(hw->spi_clk)) {
+ ret = PTR_ERR(hw->spi_clk);
+ goto err_out;
+ }
+
+ hw->syscon = syscon_regmap_lookup_by_pdevname("syscon.3");
+ if (IS_ERR(hw->syscon)) {
+ ret = PTR_ERR(hw->syscon);
+ goto err_out;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->syncio = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->syncio)) {
+ ret = PTR_ERR(hw->syncio);
+ goto err_out;
+ }
+
+ /* Disable extended mode due hardware problems */
+ regmap_update_bits(hw->syscon, SYSCON_OFFSET, SYSCON3_ADCCON, 0);
+
+ /* Clear possible pending interrupt */
+ readl(hw->syncio);
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_clps711x_isr, 0,
+ dev_name(&pdev->dev), master);
+ if (ret)
+ goto err_out;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (!ret) {
+ dev_info(&pdev->dev,
+ "SPI bus driver initialized. Master clock %u Hz\n",
+ master->max_speed_hz);
+ return 0;
+ }
+
+ dev_err(&pdev->dev, "Failed to register master\n");
+
+err_out:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver clps711x_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ },
+ .probe = spi_clps711x_probe,
+};
+module_platform_driver(clps711x_spi_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Alexander Shiyan <shc_work@mail.ru>");
+MODULE_DESCRIPTION("CLPS711X SPI bus driver");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-coldfire-qspi.c b/drivers/spi/spi-coldfire-qspi.c
new file mode 100644
index 000000000..688956ff5
--- /dev/null
+++ b/drivers/spi/spi-coldfire-qspi.c
@@ -0,0 +1,529 @@
+/*
+ * Freescale/Motorola Coldfire Queued SPI driver
+ *
+ * Copyright 2010 Steven King <sfking@fdwdc.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+*/
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+
+#include <asm/coldfire.h>
+#include <asm/mcfsim.h>
+#include <asm/mcfqspi.h>
+
+#define DRIVER_NAME "mcfqspi"
+
+#define MCFQSPI_BUSCLK (MCF_BUSCLK / 2)
+
+#define MCFQSPI_QMR 0x00
+#define MCFQSPI_QMR_MSTR 0x8000
+#define MCFQSPI_QMR_CPOL 0x0200
+#define MCFQSPI_QMR_CPHA 0x0100
+#define MCFQSPI_QDLYR 0x04
+#define MCFQSPI_QDLYR_SPE 0x8000
+#define MCFQSPI_QWR 0x08
+#define MCFQSPI_QWR_HALT 0x8000
+#define MCFQSPI_QWR_WREN 0x4000
+#define MCFQSPI_QWR_CSIV 0x1000
+#define MCFQSPI_QIR 0x0C
+#define MCFQSPI_QIR_WCEFB 0x8000
+#define MCFQSPI_QIR_ABRTB 0x4000
+#define MCFQSPI_QIR_ABRTL 0x1000
+#define MCFQSPI_QIR_WCEFE 0x0800
+#define MCFQSPI_QIR_ABRTE 0x0400
+#define MCFQSPI_QIR_SPIFE 0x0100
+#define MCFQSPI_QIR_WCEF 0x0008
+#define MCFQSPI_QIR_ABRT 0x0004
+#define MCFQSPI_QIR_SPIF 0x0001
+#define MCFQSPI_QAR 0x010
+#define MCFQSPI_QAR_TXBUF 0x00
+#define MCFQSPI_QAR_RXBUF 0x10
+#define MCFQSPI_QAR_CMDBUF 0x20
+#define MCFQSPI_QDR 0x014
+#define MCFQSPI_QCR 0x014
+#define MCFQSPI_QCR_CONT 0x8000
+#define MCFQSPI_QCR_BITSE 0x4000
+#define MCFQSPI_QCR_DT 0x2000
+
+struct mcfqspi {
+ void __iomem *iobase;
+ int irq;
+ struct clk *clk;
+ struct mcfqspi_cs_control *cs_control;
+
+ wait_queue_head_t waitq;
+};
+
+static void mcfqspi_wr_qmr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QMR);
+}
+
+static void mcfqspi_wr_qdlyr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static u16 mcfqspi_rd_qdlyr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDLYR);
+}
+
+static void mcfqspi_wr_qwr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QWR);
+}
+
+static void mcfqspi_wr_qir(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QIR);
+}
+
+static void mcfqspi_wr_qar(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QAR);
+}
+
+static void mcfqspi_wr_qdr(struct mcfqspi *mcfqspi, u16 val)
+{
+ writew(val, mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static u16 mcfqspi_rd_qdr(struct mcfqspi *mcfqspi)
+{
+ return readw(mcfqspi->iobase + MCFQSPI_QDR);
+}
+
+static void mcfqspi_cs_select(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->select(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static void mcfqspi_cs_deselect(struct mcfqspi *mcfqspi, u8 chip_select,
+ bool cs_high)
+{
+ mcfqspi->cs_control->deselect(mcfqspi->cs_control, chip_select, cs_high);
+}
+
+static int mcfqspi_cs_setup(struct mcfqspi *mcfqspi)
+{
+ return (mcfqspi->cs_control->setup) ?
+ mcfqspi->cs_control->setup(mcfqspi->cs_control) : 0;
+}
+
+static void mcfqspi_cs_teardown(struct mcfqspi *mcfqspi)
+{
+ if (mcfqspi->cs_control->teardown)
+ mcfqspi->cs_control->teardown(mcfqspi->cs_control);
+}
+
+static u8 mcfqspi_qmr_baud(u32 speed_hz)
+{
+ return clamp((MCFQSPI_BUSCLK + speed_hz - 1) / speed_hz, 2u, 255u);
+}
+
+static bool mcfqspi_qdlyr_spe(struct mcfqspi *mcfqspi)
+{
+ return mcfqspi_rd_qdlyr(mcfqspi) & MCFQSPI_QDLYR_SPE;
+}
+
+static irqreturn_t mcfqspi_irq_handler(int this_irq, void *dev_id)
+{
+ struct mcfqspi *mcfqspi = dev_id;
+
+ /* clear interrupt */
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE | MCFQSPI_QIR_SPIF);
+ wake_up(&mcfqspi->waitq);
+
+ return IRQ_HANDLED;
+}
+
+static void mcfqspi_transfer_msg8(struct mcfqspi *mcfqspi, unsigned count,
+ const u8 *txbuf, u8 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_transfer_msg16(struct mcfqspi *mcfqspi, unsigned count,
+ const u16 *txbuf, u16 *rxbuf)
+{
+ unsigned i, n, offset = 0;
+
+ n = min(count, 16u);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_CMDBUF);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, MCFQSPI_QCR_BITSE);
+
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_TXBUF);
+ if (txbuf)
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ else
+ for (i = 0; i < count; ++i)
+ mcfqspi_wr_qdr(mcfqspi, 0);
+
+ count -= n;
+ if (count) {
+ u16 qwr = 0xf08;
+ mcfqspi_wr_qwr(mcfqspi, 0x700);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+
+ do {
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+ n = min(count, 8u);
+ if (txbuf) {
+ mcfqspi_wr_qar(mcfqspi,
+ MCFQSPI_QAR_TXBUF + offset);
+ for (i = 0; i < n; ++i)
+ mcfqspi_wr_qdr(mcfqspi, *txbuf++);
+ }
+ qwr = (offset ? 0x808 : 0x000) + ((n - 1) << 8);
+ offset ^= 8;
+ count -= n;
+ } while (count);
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ mcfqspi_wr_qwr(mcfqspi, qwr);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < 8; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ offset ^= 8;
+ }
+ } else {
+ mcfqspi_wr_qwr(mcfqspi, (n - 1) << 8);
+ mcfqspi_wr_qdlyr(mcfqspi, MCFQSPI_QDLYR_SPE);
+ }
+ wait_event(mcfqspi->waitq, !mcfqspi_qdlyr_spe(mcfqspi));
+ if (rxbuf) {
+ mcfqspi_wr_qar(mcfqspi, MCFQSPI_QAR_RXBUF + offset);
+ for (i = 0; i < n; ++i)
+ *rxbuf++ = mcfqspi_rd_qdr(mcfqspi);
+ }
+}
+
+static void mcfqspi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(spi->master);
+ bool cs_high = spi->mode & SPI_CS_HIGH;
+
+ if (enable)
+ mcfqspi_cs_select(mcfqspi, spi->chip_select, cs_high);
+ else
+ mcfqspi_cs_deselect(mcfqspi, spi->chip_select, cs_high);
+}
+
+static int mcfqspi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ u16 qmr = MCFQSPI_QMR_MSTR;
+
+ qmr |= t->bits_per_word << 10;
+ if (spi->mode & SPI_CPHA)
+ qmr |= MCFQSPI_QMR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ qmr |= MCFQSPI_QMR_CPOL;
+ qmr |= mcfqspi_qmr_baud(t->speed_hz);
+ mcfqspi_wr_qmr(mcfqspi, qmr);
+
+ mcfqspi_wr_qir(mcfqspi, MCFQSPI_QIR_SPIFE);
+ if (t->bits_per_word == 8)
+ mcfqspi_transfer_msg8(mcfqspi, t->len, t->tx_buf, t->rx_buf);
+ else
+ mcfqspi_transfer_msg16(mcfqspi, t->len / 2, t->tx_buf,
+ t->rx_buf);
+ mcfqspi_wr_qir(mcfqspi, 0);
+
+ return 0;
+}
+
+static int mcfqspi_setup(struct spi_device *spi)
+{
+ mcfqspi_cs_deselect(spi_master_get_devdata(spi->master),
+ spi->chip_select, spi->mode & SPI_CS_HIGH);
+
+ dev_dbg(&spi->dev,
+ "bits per word %d, chip select %d, speed %d KHz\n",
+ spi->bits_per_word, spi->chip_select,
+ (MCFQSPI_BUSCLK / mcfqspi_qmr_baud(spi->max_speed_hz))
+ / 1000);
+
+ return 0;
+}
+
+static int mcfqspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mcfqspi *mcfqspi;
+ struct resource *res;
+ struct mcfqspi_platform_data *pdata;
+ int status;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (!pdata) {
+ dev_dbg(&pdev->dev, "platform data is missing\n");
+ return -ENOENT;
+ }
+
+ if (!pdata->cs_control) {
+ dev_dbg(&pdev->dev, "pdata->cs_control is NULL\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*mcfqspi));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "spi_alloc_master failed\n");
+ return -ENOMEM;
+ }
+
+ mcfqspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ mcfqspi->iobase = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(mcfqspi->iobase)) {
+ status = PTR_ERR(mcfqspi->iobase);
+ goto fail0;
+ }
+
+ mcfqspi->irq = platform_get_irq(pdev, 0);
+ if (mcfqspi->irq < 0) {
+ dev_dbg(&pdev->dev, "platform_get_irq failed\n");
+ status = -ENXIO;
+ goto fail0;
+ }
+
+ status = devm_request_irq(&pdev->dev, mcfqspi->irq, mcfqspi_irq_handler,
+ 0, pdev->name, mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "request_irq failed\n");
+ goto fail0;
+ }
+
+ mcfqspi->clk = devm_clk_get(&pdev->dev, "qspi_clk");
+ if (IS_ERR(mcfqspi->clk)) {
+ dev_dbg(&pdev->dev, "clk_get failed\n");
+ status = PTR_ERR(mcfqspi->clk);
+ goto fail0;
+ }
+ clk_enable(mcfqspi->clk);
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->num_chipselect;
+
+ mcfqspi->cs_control = pdata->cs_control;
+ status = mcfqspi_cs_setup(mcfqspi);
+ if (status) {
+ dev_dbg(&pdev->dev, "error initializing cs_control\n");
+ goto fail1;
+ }
+
+ init_waitqueue_head(&mcfqspi->waitq);
+
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 16);
+ master->setup = mcfqspi_setup;
+ master->set_cs = mcfqspi_set_cs;
+ master->transfer_one = mcfqspi_transfer_one;
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status) {
+ dev_dbg(&pdev->dev, "spi_register_master failed\n");
+ goto fail2;
+ }
+ pm_runtime_enable(&pdev->dev);
+
+ dev_info(&pdev->dev, "Coldfire QSPI bus driver\n");
+
+ return 0;
+
+fail2:
+ mcfqspi_cs_teardown(mcfqspi);
+fail1:
+ clk_disable(mcfqspi->clk);
+fail0:
+ spi_master_put(master);
+
+ dev_dbg(&pdev->dev, "Coldfire QSPI probe failed\n");
+
+ return status;
+}
+
+static int mcfqspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+ /* disable the hardware (set the baud rate to 0) */
+ mcfqspi_wr_qmr(mcfqspi, MCFQSPI_QMR_MSTR);
+
+ mcfqspi_cs_teardown(mcfqspi);
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int mcfqspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_enable(mcfqspi->clk);
+
+ return spi_master_resume(master);
+}
+#endif
+
+#ifdef CONFIG_PM
+static int mcfqspi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_disable(mcfqspi->clk);
+
+ return 0;
+}
+
+static int mcfqspi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mcfqspi *mcfqspi = spi_master_get_devdata(master);
+
+ clk_enable(mcfqspi->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops mcfqspi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(mcfqspi_suspend, mcfqspi_resume)
+ SET_RUNTIME_PM_OPS(mcfqspi_runtime_suspend, mcfqspi_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver mcfqspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &mcfqspi_pm,
+ .probe = mcfqspi_probe,
+ .remove = mcfqspi_remove,
+};
+module_platform_driver(mcfqspi_driver);
+
+MODULE_AUTHOR("Steven King <sfking@fdwdc.com>");
+MODULE_DESCRIPTION("Coldfire QSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
new file mode 100644
index 000000000..5e991065f
--- /dev/null
+++ b/drivers/spi/spi-davinci.c
@@ -0,0 +1,1126 @@
+/*
+ * Copyright (C) 2009 Texas Instruments.
+ * Copyright (C) 2010 EF Johnson Technologies
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/edma.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/slab.h>
+
+#include <linux/platform_data/spi-davinci.h>
+
+#define SPI_NO_RESOURCE ((resource_size_t)-1)
+
+#define CS_DEFAULT 0xFF
+
+#define SPIFMT_PHASE_MASK BIT(16)
+#define SPIFMT_POLARITY_MASK BIT(17)
+#define SPIFMT_DISTIMER_MASK BIT(18)
+#define SPIFMT_SHIFTDIR_MASK BIT(20)
+#define SPIFMT_WAITENA_MASK BIT(21)
+#define SPIFMT_PARITYENA_MASK BIT(22)
+#define SPIFMT_ODD_PARITY_MASK BIT(23)
+#define SPIFMT_WDELAY_MASK 0x3f000000u
+#define SPIFMT_WDELAY_SHIFT 24
+#define SPIFMT_PRESCALE_SHIFT 8
+
+/* SPIPC0 */
+#define SPIPC0_DIFUN_MASK BIT(11) /* MISO */
+#define SPIPC0_DOFUN_MASK BIT(10) /* MOSI */
+#define SPIPC0_CLKFUN_MASK BIT(9) /* CLK */
+#define SPIPC0_SPIENA_MASK BIT(8) /* nREADY */
+
+#define SPIINT_MASKALL 0x0101035F
+#define SPIINT_MASKINT 0x0000015F
+#define SPI_INTLVL_1 0x000001FF
+#define SPI_INTLVL_0 0x00000000
+
+/* SPIDAT1 (upper 16 bit defines) */
+#define SPIDAT1_CSHOLD_MASK BIT(12)
+#define SPIDAT1_WDEL BIT(10)
+
+/* SPIGCR1 */
+#define SPIGCR1_CLKMOD_MASK BIT(1)
+#define SPIGCR1_MASTER_MASK BIT(0)
+#define SPIGCR1_POWERDOWN_MASK BIT(8)
+#define SPIGCR1_LOOPBACK_MASK BIT(16)
+#define SPIGCR1_SPIENA_MASK BIT(24)
+
+/* SPIBUF */
+#define SPIBUF_TXFULL_MASK BIT(29)
+#define SPIBUF_RXEMPTY_MASK BIT(31)
+
+/* SPIDELAY */
+#define SPIDELAY_C2TDELAY_SHIFT 24
+#define SPIDELAY_C2TDELAY_MASK (0xFF << SPIDELAY_C2TDELAY_SHIFT)
+#define SPIDELAY_T2CDELAY_SHIFT 16
+#define SPIDELAY_T2CDELAY_MASK (0xFF << SPIDELAY_T2CDELAY_SHIFT)
+#define SPIDELAY_T2EDELAY_SHIFT 8
+#define SPIDELAY_T2EDELAY_MASK (0xFF << SPIDELAY_T2EDELAY_SHIFT)
+#define SPIDELAY_C2EDELAY_SHIFT 0
+#define SPIDELAY_C2EDELAY_MASK 0xFF
+
+/* Error Masks */
+#define SPIFLG_DLEN_ERR_MASK BIT(0)
+#define SPIFLG_TIMEOUT_MASK BIT(1)
+#define SPIFLG_PARERR_MASK BIT(2)
+#define SPIFLG_DESYNC_MASK BIT(3)
+#define SPIFLG_BITERR_MASK BIT(4)
+#define SPIFLG_OVRRUN_MASK BIT(6)
+#define SPIFLG_BUF_INIT_ACTIVE_MASK BIT(24)
+#define SPIFLG_ERROR_MASK (SPIFLG_DLEN_ERR_MASK \
+ | SPIFLG_TIMEOUT_MASK | SPIFLG_PARERR_MASK \
+ | SPIFLG_DESYNC_MASK | SPIFLG_BITERR_MASK \
+ | SPIFLG_OVRRUN_MASK)
+
+#define SPIINT_DMA_REQ_EN BIT(16)
+
+/* SPI Controller registers */
+#define SPIGCR0 0x00
+#define SPIGCR1 0x04
+#define SPIINT 0x08
+#define SPILVL 0x0c
+#define SPIFLG 0x10
+#define SPIPC0 0x14
+#define SPIDAT1 0x3c
+#define SPIBUF 0x40
+#define SPIDELAY 0x48
+#define SPIDEF 0x4c
+#define SPIFMT0 0x50
+
+/* SPI Controller driver's private data. */
+struct davinci_spi {
+ struct spi_bitbang bitbang;
+ struct clk *clk;
+
+ u8 version;
+ resource_size_t pbase;
+ void __iomem *base;
+ u32 irq;
+ struct completion done;
+
+ const void *tx;
+ void *rx;
+ int rcount;
+ int wcount;
+
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ int dma_rx_chnum;
+ int dma_tx_chnum;
+
+ struct davinci_spi_platform_data pdata;
+
+ void (*get_rx)(u32 rx_data, struct davinci_spi *);
+ u32 (*get_tx)(struct davinci_spi *);
+
+ u8 *bytes_per_word;
+};
+
+static struct davinci_spi_config davinci_spi_default_cfg;
+
+static void davinci_spi_rx_buf_u8(u32 data, struct davinci_spi *dspi)
+{
+ if (dspi->rx) {
+ u8 *rx = dspi->rx;
+ *rx++ = (u8)data;
+ dspi->rx = rx;
+ }
+}
+
+static void davinci_spi_rx_buf_u16(u32 data, struct davinci_spi *dspi)
+{
+ if (dspi->rx) {
+ u16 *rx = dspi->rx;
+ *rx++ = (u16)data;
+ dspi->rx = rx;
+ }
+}
+
+static u32 davinci_spi_tx_buf_u8(struct davinci_spi *dspi)
+{
+ u32 data = 0;
+
+ if (dspi->tx) {
+ const u8 *tx = dspi->tx;
+
+ data = *tx++;
+ dspi->tx = tx;
+ }
+ return data;
+}
+
+static u32 davinci_spi_tx_buf_u16(struct davinci_spi *dspi)
+{
+ u32 data = 0;
+
+ if (dspi->tx) {
+ const u16 *tx = dspi->tx;
+
+ data = *tx++;
+ dspi->tx = tx;
+ }
+ return data;
+}
+
+static inline void set_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v |= bits;
+ iowrite32(v, addr);
+}
+
+static inline void clear_io_bits(void __iomem *addr, u32 bits)
+{
+ u32 v = ioread32(addr);
+
+ v &= ~bits;
+ iowrite32(v, addr);
+}
+
+/*
+ * Interface to control the chip select signal
+ */
+static void davinci_spi_chipselect(struct spi_device *spi, int value)
+{
+ struct davinci_spi *dspi;
+ struct davinci_spi_platform_data *pdata;
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ u8 chip_sel = spi->chip_select;
+ u16 spidat1 = CS_DEFAULT;
+ bool gpio_chipsel = false;
+ int gpio;
+
+ dspi = spi_master_get_devdata(spi->master);
+ pdata = &dspi->pdata;
+
+ if (spi->cs_gpio >= 0) {
+ /* SPI core parse and update master->cs_gpio */
+ gpio_chipsel = true;
+ gpio = spi->cs_gpio;
+ }
+
+ /* program delay transfers if tx_delay is non zero */
+ if (spicfg->wdelay)
+ spidat1 |= SPIDAT1_WDEL;
+
+ /*
+ * Board specific chip select logic decides the polarity and cs
+ * line for the controller
+ */
+ if (gpio_chipsel) {
+ if (value == BITBANG_CS_ACTIVE)
+ gpio_set_value(gpio, spi->mode & SPI_CS_HIGH);
+ else
+ gpio_set_value(gpio, !(spi->mode & SPI_CS_HIGH));
+ } else {
+ if (value == BITBANG_CS_ACTIVE) {
+ spidat1 |= SPIDAT1_CSHOLD_MASK;
+ spidat1 &= ~(0x1 << chip_sel);
+ }
+ }
+
+ iowrite16(spidat1, dspi->base + SPIDAT1 + 2);
+}
+
+/**
+ * davinci_spi_get_prescale - Calculates the correct prescale value
+ * @maxspeed_hz: the maximum rate the SPI clock can run at
+ *
+ * This function calculates the prescale value that generates a clock rate
+ * less than or equal to the specified maximum.
+ *
+ * Returns: calculated prescale - 1 for easy programming into SPI registers
+ * or negative error number if valid prescalar cannot be updated.
+ */
+static inline int davinci_spi_get_prescale(struct davinci_spi *dspi,
+ u32 max_speed_hz)
+{
+ int ret;
+
+ ret = DIV_ROUND_UP(clk_get_rate(dspi->clk), max_speed_hz);
+
+ if (ret < 3 || ret > 256)
+ return -EINVAL;
+
+ return ret - 1;
+}
+
+/**
+ * davinci_spi_setup_transfer - This functions will determine transfer method
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function determines data transfer method (8/16/32 bit transfer).
+ * It will also set the SPI Clock Control register according to
+ * SPI slave device freq.
+ */
+static int davinci_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+
+ struct davinci_spi *dspi;
+ struct davinci_spi_config *spicfg;
+ u8 bits_per_word = 0;
+ u32 hz = 0, spifmt = 0;
+ int prescale;
+
+ dspi = spi_master_get_devdata(spi->master);
+ spicfg = spi->controller_data;
+ if (!spicfg)
+ spicfg = &davinci_spi_default_cfg;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* if bits_per_word is not set then set it default */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ /*
+ * Assign function pointer to appropriate transfer method
+ * 8bit, 16bit or 32bit transfer
+ */
+ if (bits_per_word <= 8) {
+ dspi->get_rx = davinci_spi_rx_buf_u8;
+ dspi->get_tx = davinci_spi_tx_buf_u8;
+ dspi->bytes_per_word[spi->chip_select] = 1;
+ } else {
+ dspi->get_rx = davinci_spi_rx_buf_u16;
+ dspi->get_tx = davinci_spi_tx_buf_u16;
+ dspi->bytes_per_word[spi->chip_select] = 2;
+ }
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ /* Set up SPIFMTn register, unique to this chipselect. */
+
+ prescale = davinci_spi_get_prescale(dspi, hz);
+ if (prescale < 0)
+ return prescale;
+
+ spifmt = (prescale << SPIFMT_PRESCALE_SHIFT) | (bits_per_word & 0x1f);
+
+ if (spi->mode & SPI_LSB_FIRST)
+ spifmt |= SPIFMT_SHIFTDIR_MASK;
+
+ if (spi->mode & SPI_CPOL)
+ spifmt |= SPIFMT_POLARITY_MASK;
+
+ if (!(spi->mode & SPI_CPHA))
+ spifmt |= SPIFMT_PHASE_MASK;
+
+ /*
+ * Assume wdelay is used only on SPI peripherals that has this field
+ * in SPIFMTn register and when it's configured from board file or DT.
+ */
+ if (spicfg->wdelay)
+ spifmt |= ((spicfg->wdelay << SPIFMT_WDELAY_SHIFT)
+ & SPIFMT_WDELAY_MASK);
+
+ /*
+ * Version 1 hardware supports two basic SPI modes:
+ * - Standard SPI mode uses 4 pins, with chipselect
+ * - 3 pin SPI is a 4 pin variant without CS (SPI_NO_CS)
+ * (distinct from SPI_3WIRE, with just one data wire;
+ * or similar variants without MOSI or without MISO)
+ *
+ * Version 2 hardware supports an optional handshaking signal,
+ * so it can support two more modes:
+ * - 5 pin SPI variant is standard SPI plus SPI_READY
+ * - 4 pin with enable is (SPI_READY | SPI_NO_CS)
+ */
+
+ if (dspi->version == SPI_VERSION_2) {
+
+ u32 delay = 0;
+
+ if (spicfg->odd_parity)
+ spifmt |= SPIFMT_ODD_PARITY_MASK;
+
+ if (spicfg->parity_enable)
+ spifmt |= SPIFMT_PARITYENA_MASK;
+
+ if (spicfg->timer_disable) {
+ spifmt |= SPIFMT_DISTIMER_MASK;
+ } else {
+ delay |= (spicfg->c2tdelay << SPIDELAY_C2TDELAY_SHIFT)
+ & SPIDELAY_C2TDELAY_MASK;
+ delay |= (spicfg->t2cdelay << SPIDELAY_T2CDELAY_SHIFT)
+ & SPIDELAY_T2CDELAY_MASK;
+ }
+
+ if (spi->mode & SPI_READY) {
+ spifmt |= SPIFMT_WAITENA_MASK;
+ delay |= (spicfg->t2edelay << SPIDELAY_T2EDELAY_SHIFT)
+ & SPIDELAY_T2EDELAY_MASK;
+ delay |= (spicfg->c2edelay << SPIDELAY_C2EDELAY_SHIFT)
+ & SPIDELAY_C2EDELAY_MASK;
+ }
+
+ iowrite32(delay, dspi->base + SPIDELAY);
+ }
+
+ iowrite32(spifmt, dspi->base + SPIFMT0);
+
+ return 0;
+}
+
+static int davinci_spi_of_setup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+ struct device_node *np = spi->dev.of_node;
+ u32 prop;
+
+ if (spicfg == NULL && np) {
+ spicfg = kzalloc(sizeof(*spicfg), GFP_KERNEL);
+ if (!spicfg)
+ return -ENOMEM;
+ *spicfg = davinci_spi_default_cfg;
+ /* override with dt configured values */
+ if (!of_property_read_u32(np, "ti,spi-wdelay", &prop))
+ spicfg->wdelay = (u8)prop;
+ spi->controller_data = spicfg;
+ }
+
+ return 0;
+}
+
+/**
+ * davinci_spi_setup - This functions will set default transfer method
+ * @spi: spi device on which data transfer to be done
+ *
+ * This functions sets the default transfer method.
+ */
+static int davinci_spi_setup(struct spi_device *spi)
+{
+ int retval = 0;
+ struct davinci_spi *dspi;
+ struct davinci_spi_platform_data *pdata;
+ struct spi_master *master = spi->master;
+ struct device_node *np = spi->dev.of_node;
+ bool internal_cs = true;
+
+ dspi = spi_master_get_devdata(spi->master);
+ pdata = &dspi->pdata;
+
+ if (!(spi->mode & SPI_NO_CS)) {
+ if (np && (master->cs_gpios != NULL) && (spi->cs_gpio >= 0)) {
+ retval = gpio_direction_output(
+ spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ internal_cs = false;
+ } else if (pdata->chip_sel &&
+ spi->chip_select < pdata->num_chipselect &&
+ pdata->chip_sel[spi->chip_select] != SPI_INTERN_CS) {
+ spi->cs_gpio = pdata->chip_sel[spi->chip_select];
+ retval = gpio_direction_output(
+ spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+ internal_cs = false;
+ }
+
+ if (retval) {
+ dev_err(&spi->dev, "GPIO %d setup failed (%d)\n",
+ spi->cs_gpio, retval);
+ return retval;
+ }
+
+ if (internal_cs)
+ set_io_bits(dspi->base + SPIPC0, 1 << spi->chip_select);
+ }
+
+ if (spi->mode & SPI_READY)
+ set_io_bits(dspi->base + SPIPC0, SPIPC0_SPIENA_MASK);
+
+ if (spi->mode & SPI_LOOP)
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+ else
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_LOOPBACK_MASK);
+
+ return davinci_spi_of_setup(spi);
+}
+
+static void davinci_spi_cleanup(struct spi_device *spi)
+{
+ struct davinci_spi_config *spicfg = spi->controller_data;
+
+ spi->controller_data = NULL;
+ if (spi->dev.of_node)
+ kfree(spicfg);
+}
+
+static int davinci_spi_check_error(struct davinci_spi *dspi, int int_status)
+{
+ struct device *sdev = dspi->bitbang.master->dev.parent;
+
+ if (int_status & SPIFLG_TIMEOUT_MASK) {
+ dev_dbg(sdev, "SPI Time-out Error\n");
+ return -ETIMEDOUT;
+ }
+ if (int_status & SPIFLG_DESYNC_MASK) {
+ dev_dbg(sdev, "SPI Desynchronization Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BITERR_MASK) {
+ dev_dbg(sdev, "SPI Bit error\n");
+ return -EIO;
+ }
+
+ if (dspi->version == SPI_VERSION_2) {
+ if (int_status & SPIFLG_DLEN_ERR_MASK) {
+ dev_dbg(sdev, "SPI Data Length Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_PARERR_MASK) {
+ dev_dbg(sdev, "SPI Parity Error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_OVRRUN_MASK) {
+ dev_dbg(sdev, "SPI Data Overrun error\n");
+ return -EIO;
+ }
+ if (int_status & SPIFLG_BUF_INIT_ACTIVE_MASK) {
+ dev_dbg(sdev, "SPI Buffer Init Active\n");
+ return -EBUSY;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * davinci_spi_process_events - check for and handle any SPI controller events
+ * @dspi: the controller data
+ *
+ * This function will check the SPIFLG register and handle any events that are
+ * detected there
+ */
+static int davinci_spi_process_events(struct davinci_spi *dspi)
+{
+ u32 buf, status, errors = 0, spidat1;
+
+ buf = ioread32(dspi->base + SPIBUF);
+
+ if (dspi->rcount > 0 && !(buf & SPIBUF_RXEMPTY_MASK)) {
+ dspi->get_rx(buf & 0xFFFF, dspi);
+ dspi->rcount--;
+ }
+
+ status = ioread32(dspi->base + SPIFLG);
+
+ if (unlikely(status & SPIFLG_ERROR_MASK)) {
+ errors = status & SPIFLG_ERROR_MASK;
+ goto out;
+ }
+
+ if (dspi->wcount > 0 && !(buf & SPIBUF_TXFULL_MASK)) {
+ spidat1 = ioread32(dspi->base + SPIDAT1);
+ dspi->wcount--;
+ spidat1 &= ~0xFFFF;
+ spidat1 |= 0xFFFF & dspi->get_tx(dspi);
+ iowrite32(spidat1, dspi->base + SPIDAT1);
+ }
+
+out:
+ return errors;
+}
+
+static void davinci_spi_dma_rx_callback(void *data)
+{
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
+
+ dspi->rcount = 0;
+
+ if (!dspi->wcount && !dspi->rcount)
+ complete(&dspi->done);
+}
+
+static void davinci_spi_dma_tx_callback(void *data)
+{
+ struct davinci_spi *dspi = (struct davinci_spi *)data;
+
+ dspi->wcount = 0;
+
+ if (!dspi->wcount && !dspi->rcount)
+ complete(&dspi->done);
+}
+
+/**
+ * davinci_spi_bufs - functions which will handle transfer data
+ * @spi: spi device on which data transfer to be done
+ * @t: spi transfer in which transfer info is filled
+ *
+ * This function will put data to be transferred into data register
+ * of SPI controller and then wait until the completion will be marked
+ * by the IRQ Handler.
+ */
+static int davinci_spi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct davinci_spi *dspi;
+ int data_type, ret = -ENOMEM;
+ u32 tx_data, spidat1;
+ u32 errors = 0;
+ struct davinci_spi_config *spicfg;
+ struct davinci_spi_platform_data *pdata;
+ unsigned uninitialized_var(rx_buf_count);
+ void *dummy_buf = NULL;
+ struct scatterlist sg_rx, sg_tx;
+
+ dspi = spi_master_get_devdata(spi->master);
+ pdata = &dspi->pdata;
+ spicfg = (struct davinci_spi_config *)spi->controller_data;
+ if (!spicfg)
+ spicfg = &davinci_spi_default_cfg;
+
+ /* convert len to words based on bits_per_word */
+ data_type = dspi->bytes_per_word[spi->chip_select];
+
+ dspi->tx = t->tx_buf;
+ dspi->rx = t->rx_buf;
+ dspi->wcount = t->len / data_type;
+ dspi->rcount = dspi->wcount;
+
+ spidat1 = ioread32(dspi->base + SPIDAT1);
+
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+
+ reinit_completion(&dspi->done);
+
+ if (spicfg->io_type == SPI_IO_TYPE_INTR)
+ set_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+
+ if (spicfg->io_type != SPI_IO_TYPE_DMA) {
+ /* start the transfer */
+ dspi->wcount--;
+ tx_data = dspi->get_tx(dspi);
+ spidat1 &= 0xFFFF0000;
+ spidat1 |= tx_data & 0xFFFF;
+ iowrite32(spidat1, dspi->base + SPIDAT1);
+ } else {
+ struct dma_slave_config dma_rx_conf = {
+ .direction = DMA_DEV_TO_MEM,
+ .src_addr = (unsigned long)dspi->pbase + SPIBUF,
+ .src_addr_width = data_type,
+ .src_maxburst = 1,
+ };
+ struct dma_slave_config dma_tx_conf = {
+ .direction = DMA_MEM_TO_DEV,
+ .dst_addr = (unsigned long)dspi->pbase + SPIDAT1,
+ .dst_addr_width = data_type,
+ .dst_maxburst = 1,
+ };
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+ void *buf;
+
+ dummy_buf = kzalloc(t->len, GFP_KERNEL);
+ if (!dummy_buf)
+ goto err_alloc_dummy_buf;
+
+ dmaengine_slave_config(dspi->dma_rx, &dma_rx_conf);
+ dmaengine_slave_config(dspi->dma_tx, &dma_tx_conf);
+
+ sg_init_table(&sg_rx, 1);
+ if (!t->rx_buf)
+ buf = dummy_buf;
+ else
+ buf = t->rx_buf;
+ t->rx_dma = dma_map_single(&spi->dev, buf,
+ t->len, DMA_FROM_DEVICE);
+ if (!t->rx_dma) {
+ ret = -EFAULT;
+ goto err_rx_map;
+ }
+ sg_dma_address(&sg_rx) = t->rx_dma;
+ sg_dma_len(&sg_rx) = t->len;
+
+ sg_init_table(&sg_tx, 1);
+ if (!t->tx_buf)
+ buf = dummy_buf;
+ else
+ buf = (void *)t->tx_buf;
+ t->tx_dma = dma_map_single(&spi->dev, buf,
+ t->len, DMA_TO_DEVICE);
+ if (!t->tx_dma) {
+ ret = -EFAULT;
+ goto err_tx_map;
+ }
+ sg_dma_address(&sg_tx) = t->tx_dma;
+ sg_dma_len(&sg_tx) = t->len;
+
+ rxdesc = dmaengine_prep_slave_sg(dspi->dma_rx,
+ &sg_rx, 1, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_desc;
+
+ txdesc = dmaengine_prep_slave_sg(dspi->dma_tx,
+ &sg_tx, 1, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_desc;
+
+ rxdesc->callback = davinci_spi_dma_rx_callback;
+ rxdesc->callback_param = (void *)dspi;
+ txdesc->callback = davinci_spi_dma_tx_callback;
+ txdesc->callback_param = (void *)dspi;
+
+ if (pdata->cshold_bug)
+ iowrite16(spidat1 >> 16, dspi->base + SPIDAT1 + 2);
+
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+
+ dma_async_issue_pending(dspi->dma_rx);
+ dma_async_issue_pending(dspi->dma_tx);
+
+ set_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+ }
+
+ /* Wait for the transfer to complete */
+ if (spicfg->io_type != SPI_IO_TYPE_POLL) {
+ wait_for_completion_interruptible(&(dspi->done));
+ } else {
+ while (dspi->rcount > 0 || dspi->wcount > 0) {
+ errors = davinci_spi_process_events(dspi);
+ if (errors)
+ break;
+ cpu_relax();
+ }
+ }
+
+ clear_io_bits(dspi->base + SPIINT, SPIINT_MASKALL);
+ if (spicfg->io_type == SPI_IO_TYPE_DMA) {
+ clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
+
+ dma_unmap_single(&spi->dev, t->rx_dma,
+ t->len, DMA_FROM_DEVICE);
+ dma_unmap_single(&spi->dev, t->tx_dma,
+ t->len, DMA_TO_DEVICE);
+ kfree(dummy_buf);
+ }
+
+ clear_io_bits(dspi->base + SPIGCR1, SPIGCR1_SPIENA_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+ /*
+ * Check for bit error, desync error,parity error,timeout error and
+ * receive overflow errors
+ */
+ if (errors) {
+ ret = davinci_spi_check_error(dspi, errors);
+ WARN(!ret, "%s: error reported but no error found!\n",
+ dev_name(&spi->dev));
+ return ret;
+ }
+
+ if (dspi->rcount != 0 || dspi->wcount != 0) {
+ dev_err(&spi->dev, "SPI data transfer error\n");
+ return -EIO;
+ }
+
+ return t->len;
+
+err_desc:
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len, DMA_TO_DEVICE);
+err_tx_map:
+ dma_unmap_single(&spi->dev, t->rx_dma, t->len, DMA_FROM_DEVICE);
+err_rx_map:
+ kfree(dummy_buf);
+err_alloc_dummy_buf:
+ return ret;
+}
+
+/**
+ * dummy_thread_fn - dummy thread function
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * This is to satisfy the request_threaded_irq() API so that the irq
+ * handler is called in interrupt context.
+ */
+static irqreturn_t dummy_thread_fn(s32 irq, void *data)
+{
+ return IRQ_HANDLED;
+}
+
+/**
+ * davinci_spi_irq - Interrupt handler for SPI Master Controller
+ * @irq: IRQ number for this SPI Master
+ * @context_data: structure for SPI Master controller davinci_spi
+ *
+ * ISR will determine that interrupt arrives either for READ or WRITE command.
+ * According to command it will do the appropriate action. It will check
+ * transfer length and if it is not zero then dispatch transfer command again.
+ * If transfer length is zero then it will indicate the COMPLETION so that
+ * davinci_spi_bufs function can go ahead.
+ */
+static irqreturn_t davinci_spi_irq(s32 irq, void *data)
+{
+ struct davinci_spi *dspi = data;
+ int status;
+
+ status = davinci_spi_process_events(dspi);
+ if (unlikely(status != 0))
+ clear_io_bits(dspi->base + SPIINT, SPIINT_MASKINT);
+
+ if ((!dspi->rcount && !dspi->wcount) || status)
+ complete(&dspi->done);
+
+ return IRQ_HANDLED;
+}
+
+static int davinci_spi_request_dma(struct davinci_spi *dspi)
+{
+ dma_cap_mask_t mask;
+ struct device *sdev = dspi->bitbang.master->dev.parent;
+ int r;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ dspi->dma_rx = dma_request_channel(mask, edma_filter_fn,
+ &dspi->dma_rx_chnum);
+ if (!dspi->dma_rx) {
+ dev_err(sdev, "request RX DMA channel failed\n");
+ r = -ENODEV;
+ goto rx_dma_failed;
+ }
+
+ dspi->dma_tx = dma_request_channel(mask, edma_filter_fn,
+ &dspi->dma_tx_chnum);
+ if (!dspi->dma_tx) {
+ dev_err(sdev, "request TX DMA channel failed\n");
+ r = -ENODEV;
+ goto tx_dma_failed;
+ }
+
+ return 0;
+
+tx_dma_failed:
+ dma_release_channel(dspi->dma_rx);
+rx_dma_failed:
+ return r;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id davinci_spi_of_match[] = {
+ {
+ .compatible = "ti,dm6441-spi",
+ },
+ {
+ .compatible = "ti,da830-spi",
+ .data = (void *)SPI_VERSION_2,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, davinci_spi_of_match);
+
+/**
+ * spi_davinci_get_pdata - Get platform data from DTS binding
+ * @pdev: ptr to platform data
+ * @dspi: ptr to driver data
+ *
+ * Parses and populates pdata in dspi from device tree bindings.
+ *
+ * NOTE: Not all platform data params are supported currently.
+ */
+static int spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ struct device_node *node = pdev->dev.of_node;
+ struct davinci_spi_platform_data *pdata;
+ unsigned int num_cs, intr_line = 0;
+ const struct of_device_id *match;
+
+ pdata = &dspi->pdata;
+
+ pdata->version = SPI_VERSION_1;
+ match = of_match_device(davinci_spi_of_match, &pdev->dev);
+ if (!match)
+ return -ENODEV;
+
+ /* match data has the SPI version number for SPI_VERSION_2 */
+ if (match->data == (void *)SPI_VERSION_2)
+ pdata->version = SPI_VERSION_2;
+
+ /*
+ * default num_cs is 1 and all chipsel are internal to the chip
+ * indicated by chip_sel being NULL or cs_gpios being NULL or
+ * set to -ENOENT. num-cs includes internal as well as gpios.
+ * indicated by chip_sel being NULL. GPIO based CS is not
+ * supported yet in DT bindings.
+ */
+ num_cs = 1;
+ of_property_read_u32(node, "num-cs", &num_cs);
+ pdata->num_chipselect = num_cs;
+ of_property_read_u32(node, "ti,davinci-spi-intr-line", &intr_line);
+ pdata->intr_line = intr_line;
+ return 0;
+}
+#else
+static struct davinci_spi_platform_data
+ *spi_davinci_get_pdata(struct platform_device *pdev,
+ struct davinci_spi *dspi)
+{
+ return -ENODEV;
+}
+#endif
+
+/**
+ * davinci_spi_probe - probe function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * According to Linux Device Model this function will be invoked by Linux
+ * with platform_device struct which contains the device specific info.
+ * This function will map the SPI controller's memory, register IRQ,
+ * Reset SPI controller and setting its registers to default value.
+ * It will invoke spi_bitbang_start to create work queue so that client driver
+ * can register transfer method to work queue.
+ */
+static int davinci_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct davinci_spi *dspi;
+ struct davinci_spi_platform_data *pdata;
+ struct resource *r;
+ resource_size_t dma_rx_chan = SPI_NO_RESOURCE;
+ resource_size_t dma_tx_chan = SPI_NO_RESOURCE;
+ int ret = 0;
+ u32 spipc0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct davinci_spi));
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ dspi = spi_master_get_devdata(master);
+
+ if (dev_get_platdata(&pdev->dev)) {
+ pdata = dev_get_platdata(&pdev->dev);
+ dspi->pdata = *pdata;
+ } else {
+ /* update dspi pdata with that from the DT */
+ ret = spi_davinci_get_pdata(pdev, dspi);
+ if (ret < 0)
+ goto free_master;
+ }
+
+ /* pdata in dspi is now updated and point pdata to that */
+ pdata = &dspi->pdata;
+
+ dspi->bytes_per_word = devm_kzalloc(&pdev->dev,
+ sizeof(*dspi->bytes_per_word) *
+ pdata->num_chipselect, GFP_KERNEL);
+ if (dspi->bytes_per_word == NULL) {
+ ret = -ENOMEM;
+ goto free_master;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto free_master;
+ }
+
+ dspi->pbase = r->start;
+
+ dspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(dspi->base)) {
+ ret = PTR_ERR(dspi->base);
+ goto free_master;
+ }
+
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq <= 0) {
+ ret = -EINVAL;
+ goto free_master;
+ }
+
+ ret = devm_request_threaded_irq(&pdev->dev, dspi->irq, davinci_spi_irq,
+ dummy_thread_fn, 0, dev_name(&pdev->dev), dspi);
+ if (ret)
+ goto free_master;
+
+ dspi->bitbang.master = master;
+
+ dspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dspi->clk)) {
+ ret = -ENODEV;
+ goto free_master;
+ }
+ clk_prepare_enable(dspi->clk);
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = pdev->id;
+ master->num_chipselect = pdata->num_chipselect;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(2, 16);
+ master->setup = davinci_spi_setup;
+ master->cleanup = davinci_spi_cleanup;
+
+ dspi->bitbang.chipselect = davinci_spi_chipselect;
+ dspi->bitbang.setup_transfer = davinci_spi_setup_transfer;
+
+ dspi->version = pdata->version;
+
+ dspi->bitbang.flags = SPI_NO_CS | SPI_LSB_FIRST | SPI_LOOP;
+ if (dspi->version == SPI_VERSION_2)
+ dspi->bitbang.flags |= SPI_READY;
+
+ if (pdev->dev.of_node) {
+ int i;
+
+ for (i = 0; i < pdata->num_chipselect; i++) {
+ int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER) {
+ ret = cs_gpio;
+ goto free_clk;
+ }
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, cs_gpio,
+ dev_name(&pdev->dev));
+ if (ret)
+ goto free_clk;
+ }
+ }
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (r)
+ dma_rx_chan = r->start;
+ r = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (r)
+ dma_tx_chan = r->start;
+
+ dspi->bitbang.txrx_bufs = davinci_spi_bufs;
+ if (dma_rx_chan != SPI_NO_RESOURCE &&
+ dma_tx_chan != SPI_NO_RESOURCE) {
+ dspi->dma_rx_chnum = dma_rx_chan;
+ dspi->dma_tx_chnum = dma_tx_chan;
+
+ ret = davinci_spi_request_dma(dspi);
+ if (ret)
+ goto free_clk;
+
+ dev_info(&pdev->dev, "DMA: supported\n");
+ dev_info(&pdev->dev, "DMA: RX channel: %pa, TX channel: %pa, event queue: %d\n",
+ &dma_rx_chan, &dma_tx_chan,
+ pdata->dma_event_q);
+ }
+
+ dspi->get_rx = davinci_spi_rx_buf_u8;
+ dspi->get_tx = davinci_spi_tx_buf_u8;
+
+ init_completion(&dspi->done);
+
+ /* Reset In/OUT SPI module */
+ iowrite32(0, dspi->base + SPIGCR0);
+ udelay(100);
+ iowrite32(1, dspi->base + SPIGCR0);
+
+ /* Set up SPIPC0. CS and ENA init is done in davinci_spi_setup */
+ spipc0 = SPIPC0_DIFUN_MASK | SPIPC0_DOFUN_MASK | SPIPC0_CLKFUN_MASK;
+ iowrite32(spipc0, dspi->base + SPIPC0);
+
+ if (pdata->intr_line)
+ iowrite32(SPI_INTLVL_1, dspi->base + SPILVL);
+ else
+ iowrite32(SPI_INTLVL_0, dspi->base + SPILVL);
+
+ iowrite32(CS_DEFAULT, dspi->base + SPIDEF);
+
+ /* master mode default */
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_CLKMOD_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_MASTER_MASK);
+ set_io_bits(dspi->base + SPIGCR1, SPIGCR1_POWERDOWN_MASK);
+
+ ret = spi_bitbang_start(&dspi->bitbang);
+ if (ret)
+ goto free_dma;
+
+ dev_info(&pdev->dev, "Controller at 0x%p\n", dspi->base);
+
+ return ret;
+
+free_dma:
+ dma_release_channel(dspi->dma_rx);
+ dma_release_channel(dspi->dma_tx);
+free_clk:
+ clk_disable_unprepare(dspi->clk);
+free_master:
+ spi_master_put(master);
+err:
+ return ret;
+}
+
+/**
+ * davinci_spi_remove - remove function for SPI Master Controller
+ * @pdev: platform_device structure which contains plateform specific data
+ *
+ * This function will do the reverse action of davinci_spi_probe function
+ * It will free the IRQ and SPI controller's memory region.
+ * It will also call spi_bitbang_stop to destroy the work queue which was
+ * created by spi_bitbang_start.
+ */
+static int davinci_spi_remove(struct platform_device *pdev)
+{
+ struct davinci_spi *dspi;
+ struct spi_master *master;
+
+ master = platform_get_drvdata(pdev);
+ dspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&dspi->bitbang);
+
+ clk_disable_unprepare(dspi->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static struct platform_driver davinci_spi_driver = {
+ .driver = {
+ .name = "spi_davinci",
+ .of_match_table = of_match_ptr(davinci_spi_of_match),
+ },
+ .probe = davinci_spi_probe,
+ .remove = davinci_spi_remove,
+};
+module_platform_driver(davinci_spi_driver);
+
+MODULE_DESCRIPTION("TI DaVinci SPI Master Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-dln2.c b/drivers/spi/spi-dln2.c
new file mode 100644
index 000000000..3b7d91d94
--- /dev/null
+++ b/drivers/spi/spi-dln2.c
@@ -0,0 +1,881 @@
+/*
+ * Driver for the Diolan DLN-2 USB-SPI adapter
+ *
+ * Copyright (c) 2014 Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/mfd/dln2.h>
+#include <linux/spi/spi.h>
+#include <linux/pm_runtime.h>
+#include <asm/unaligned.h>
+
+#define DLN2_SPI_MODULE_ID 0x02
+#define DLN2_SPI_CMD(cmd) DLN2_CMD(cmd, DLN2_SPI_MODULE_ID)
+
+/* SPI commands */
+#define DLN2_SPI_GET_PORT_COUNT DLN2_SPI_CMD(0x00)
+#define DLN2_SPI_ENABLE DLN2_SPI_CMD(0x11)
+#define DLN2_SPI_DISABLE DLN2_SPI_CMD(0x12)
+#define DLN2_SPI_IS_ENABLED DLN2_SPI_CMD(0x13)
+#define DLN2_SPI_SET_MODE DLN2_SPI_CMD(0x14)
+#define DLN2_SPI_GET_MODE DLN2_SPI_CMD(0x15)
+#define DLN2_SPI_SET_FRAME_SIZE DLN2_SPI_CMD(0x16)
+#define DLN2_SPI_GET_FRAME_SIZE DLN2_SPI_CMD(0x17)
+#define DLN2_SPI_SET_FREQUENCY DLN2_SPI_CMD(0x18)
+#define DLN2_SPI_GET_FREQUENCY DLN2_SPI_CMD(0x19)
+#define DLN2_SPI_READ_WRITE DLN2_SPI_CMD(0x1A)
+#define DLN2_SPI_READ DLN2_SPI_CMD(0x1B)
+#define DLN2_SPI_WRITE DLN2_SPI_CMD(0x1C)
+#define DLN2_SPI_SET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x20)
+#define DLN2_SPI_GET_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x21)
+#define DLN2_SPI_SET_DELAY_AFTER_SS DLN2_SPI_CMD(0x22)
+#define DLN2_SPI_GET_DELAY_AFTER_SS DLN2_SPI_CMD(0x23)
+#define DLN2_SPI_SET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x24)
+#define DLN2_SPI_GET_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x25)
+#define DLN2_SPI_SET_SS DLN2_SPI_CMD(0x26)
+#define DLN2_SPI_GET_SS DLN2_SPI_CMD(0x27)
+#define DLN2_SPI_RELEASE_SS DLN2_SPI_CMD(0x28)
+#define DLN2_SPI_SS_VARIABLE_ENABLE DLN2_SPI_CMD(0x2B)
+#define DLN2_SPI_SS_VARIABLE_DISABLE DLN2_SPI_CMD(0x2C)
+#define DLN2_SPI_SS_VARIABLE_IS_ENABLED DLN2_SPI_CMD(0x2D)
+#define DLN2_SPI_SS_AAT_ENABLE DLN2_SPI_CMD(0x2E)
+#define DLN2_SPI_SS_AAT_DISABLE DLN2_SPI_CMD(0x2F)
+#define DLN2_SPI_SS_AAT_IS_ENABLED DLN2_SPI_CMD(0x30)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_ENABLE DLN2_SPI_CMD(0x31)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_DISABLE DLN2_SPI_CMD(0x32)
+#define DLN2_SPI_SS_BETWEEN_FRAMES_IS_ENABLED DLN2_SPI_CMD(0x33)
+#define DLN2_SPI_SET_CPHA DLN2_SPI_CMD(0x34)
+#define DLN2_SPI_GET_CPHA DLN2_SPI_CMD(0x35)
+#define DLN2_SPI_SET_CPOL DLN2_SPI_CMD(0x36)
+#define DLN2_SPI_GET_CPOL DLN2_SPI_CMD(0x37)
+#define DLN2_SPI_SS_MULTI_ENABLE DLN2_SPI_CMD(0x38)
+#define DLN2_SPI_SS_MULTI_DISABLE DLN2_SPI_CMD(0x39)
+#define DLN2_SPI_SS_MULTI_IS_ENABLED DLN2_SPI_CMD(0x3A)
+#define DLN2_SPI_GET_SUPPORTED_MODES DLN2_SPI_CMD(0x40)
+#define DLN2_SPI_GET_SUPPORTED_CPHA_VALUES DLN2_SPI_CMD(0x41)
+#define DLN2_SPI_GET_SUPPORTED_CPOL_VALUES DLN2_SPI_CMD(0x42)
+#define DLN2_SPI_GET_SUPPORTED_FRAME_SIZES DLN2_SPI_CMD(0x43)
+#define DLN2_SPI_GET_SS_COUNT DLN2_SPI_CMD(0x44)
+#define DLN2_SPI_GET_MIN_FREQUENCY DLN2_SPI_CMD(0x45)
+#define DLN2_SPI_GET_MAX_FREQUENCY DLN2_SPI_CMD(0x46)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x47)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_SS DLN2_SPI_CMD(0x48)
+#define DLN2_SPI_GET_MIN_DELAY_AFTER_SS DLN2_SPI_CMD(0x49)
+#define DLN2_SPI_GET_MAX_DELAY_AFTER_SS DLN2_SPI_CMD(0x4A)
+#define DLN2_SPI_GET_MIN_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4B)
+#define DLN2_SPI_GET_MAX_DELAY_BETWEEN_FRAMES DLN2_SPI_CMD(0x4C)
+
+#define DLN2_SPI_MAX_XFER_SIZE 256
+#define DLN2_SPI_BUF_SIZE (DLN2_SPI_MAX_XFER_SIZE + 16)
+#define DLN2_SPI_ATTR_LEAVE_SS_LOW BIT(0)
+#define DLN2_TRANSFERS_WAIT_COMPLETE 1
+#define DLN2_TRANSFERS_CANCEL 0
+#define DLN2_RPM_AUTOSUSPEND_TIMEOUT 2000
+
+struct dln2_spi {
+ struct platform_device *pdev;
+ struct spi_master *master;
+ u8 port;
+
+ /*
+ * This buffer will be used mainly for read/write operations. Since
+ * they're quite large, we cannot use the stack. Protection is not
+ * needed because all SPI communication is serialized by the SPI core.
+ */
+ void *buf;
+
+ u8 bpw;
+ u32 speed;
+ u16 mode;
+ u8 cs;
+};
+
+/*
+ * Enable/Disable SPI module. The disable command will wait for transfers to
+ * complete first.
+ */
+static int dln2_spi_enable(struct dln2_spi *dln2, bool enable)
+{
+ u16 cmd;
+ struct {
+ u8 port;
+ u8 wait_for_completion;
+ } tx;
+ unsigned len = sizeof(tx);
+
+ tx.port = dln2->port;
+
+ if (enable) {
+ cmd = DLN2_SPI_ENABLE;
+ len -= sizeof(tx.wait_for_completion);
+ } else {
+ tx.wait_for_completion = DLN2_TRANSFERS_WAIT_COMPLETE;
+ cmd = DLN2_SPI_DISABLE;
+ }
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, len);
+}
+
+/*
+ * Select/unselect multiple CS lines. The selected lines will be automatically
+ * toggled LOW/HIGH by the board firmware during transfers, provided they're
+ * enabled first.
+ *
+ * Ex: cs_mask = 0x03 -> CS0 & CS1 will be selected and the next WR/RD operation
+ * will toggle the lines LOW/HIGH automatically.
+ */
+static int dln2_spi_cs_set(struct dln2_spi *dln2, u8 cs_mask)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+
+ tx.port = dln2->port;
+
+ /*
+ * According to Diolan docs, "a slave device can be selected by changing
+ * the corresponding bit value to 0". The rest must be set to 1. Hence
+ * the bitwise NOT in front.
+ */
+ tx.cs = ~cs_mask;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_SS, &tx, sizeof(tx));
+}
+
+/*
+ * Select one CS line. The other lines will be un-selected.
+ */
+static int dln2_spi_cs_set_one(struct dln2_spi *dln2, u8 cs)
+{
+ return dln2_spi_cs_set(dln2, BIT(cs));
+}
+
+/*
+ * Enable/disable CS lines for usage. The module has to be disabled first.
+ */
+static int dln2_spi_cs_enable(struct dln2_spi *dln2, u8 cs_mask, bool enable)
+{
+ struct {
+ u8 port;
+ u8 cs;
+ } tx;
+ u16 cmd;
+
+ tx.port = dln2->port;
+ tx.cs = cs_mask;
+ cmd = enable ? DLN2_SPI_SS_MULTI_ENABLE : DLN2_SPI_SS_MULTI_DISABLE;
+
+ return dln2_transfer_tx(dln2->pdev, cmd, &tx, sizeof(tx));
+}
+
+static int dln2_spi_cs_enable_all(struct dln2_spi *dln2, bool enable)
+{
+ u8 cs_mask = GENMASK(dln2->master->num_chipselect - 1, 0);
+
+ return dln2_spi_cs_enable(dln2, cs_mask, enable);
+}
+
+static int dln2_spi_get_cs_num(struct dln2_spi *dln2, u16 *cs_num)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le16 cs_count;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SS_COUNT, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *cs_num = le16_to_cpu(rx.cs_count);
+
+ dev_dbg(&dln2->pdev->dev, "cs_num = %d\n", *cs_num);
+
+ return 0;
+}
+
+static int dln2_spi_get_speed(struct dln2_spi *dln2, u16 cmd, u32 *freq)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ __le32 speed;
+ } rx;
+ unsigned rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, cmd, &tx, sizeof(tx), &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ *freq = le32_to_cpu(rx.speed);
+
+ return 0;
+}
+
+/*
+ * Get bus min/max frequencies.
+ */
+static int dln2_spi_get_speed_range(struct dln2_spi *dln2, u32 *fmin, u32 *fmax)
+{
+ int ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MIN_FREQUENCY, fmin);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_get_speed(dln2, DLN2_SPI_GET_MAX_FREQUENCY, fmax);
+ if (ret < 0)
+ return ret;
+
+ dev_dbg(&dln2->pdev->dev, "freq_min = %d, freq_max = %d\n",
+ *fmin, *fmax);
+
+ return 0;
+}
+
+/*
+ * Set the bus speed. The module will automatically round down to the closest
+ * available frequency and returns it. The module has to be disabled first.
+ */
+static int dln2_spi_set_speed(struct dln2_spi *dln2, u32 speed)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le32 speed;
+ } __packed tx;
+ struct {
+ __le32 speed;
+ } rx;
+ int rx_len = sizeof(rx);
+
+ tx.port = dln2->port;
+ tx.speed = cpu_to_le32(speed);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_SET_FREQUENCY, &tx, sizeof(tx),
+ &rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx))
+ return -EPROTO;
+
+ return 0;
+}
+
+/*
+ * Change CPOL & CPHA. The module has to be disabled first.
+ */
+static int dln2_spi_set_mode(struct dln2_spi *dln2, u8 mode)
+{
+ struct {
+ u8 port;
+ u8 mode;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.mode = mode;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_MODE, &tx, sizeof(tx));
+}
+
+/*
+ * Change frame size. The module has to be disabled first.
+ */
+static int dln2_spi_set_bpw(struct dln2_spi *dln2, u8 bpw)
+{
+ struct {
+ u8 port;
+ u8 bpw;
+ } tx;
+
+ tx.port = dln2->port;
+ tx.bpw = bpw;
+
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_SET_FRAME_SIZE,
+ &tx, sizeof(tx));
+}
+
+static int dln2_spi_get_supported_frame_sizes(struct dln2_spi *dln2,
+ u32 *bpw_mask)
+{
+ int ret;
+ struct {
+ u8 port;
+ } tx;
+ struct {
+ u8 count;
+ u8 frame_sizes[36];
+ } *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+ int i;
+
+ tx.port = dln2->port;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_GET_SUPPORTED_FRAME_SIZES,
+ &tx, sizeof(tx), rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(*rx))
+ return -EPROTO;
+ if (rx->count > ARRAY_SIZE(rx->frame_sizes))
+ return -EPROTO;
+
+ *bpw_mask = 0;
+ for (i = 0; i < rx->count; i++)
+ *bpw_mask |= BIT(rx->frame_sizes[i] - 1);
+
+ dev_dbg(&dln2->pdev->dev, "bpw_mask = 0x%X\n", *bpw_mask);
+
+ return 0;
+}
+
+/*
+ * Copy the data to DLN2 buffer and change the byte order to LE, requested by
+ * DLN2 module. SPI core makes sure that the data length is a multiple of word
+ * size.
+ */
+static int dln2_spi_copy_to_buf(u8 *dln2_buf, const u8 *src, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dln2_buf, src, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dln2_buf, src, len);
+ } else if (bpw <= 16) {
+ __le16 *d = (__le16 *)dln2_buf;
+ u16 *s = (u16 *)src;
+
+ len = len / 2;
+ while (len--)
+ *d++ = cpu_to_le16p(s++);
+ } else {
+ __le32 *d = (__le32 *)dln2_buf;
+ u32 *s = (u32 *)src;
+
+ len = len / 4;
+ while (len--)
+ *d++ = cpu_to_le32p(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Copy the data from DLN2 buffer and convert to CPU byte order since the DLN2
+ * buffer is LE ordered. SPI core makes sure that the data length is a multiple
+ * of word size. The RX dln2_buf is 2 byte aligned so, for BE, we have to make
+ * sure we avoid unaligned accesses for 32 bit case.
+ */
+static int dln2_spi_copy_from_buf(u8 *dest, const u8 *dln2_buf, u16 len, u8 bpw)
+{
+#ifdef __LITTLE_ENDIAN
+ memcpy(dest, dln2_buf, len);
+#else
+ if (bpw <= 8) {
+ memcpy(dest, dln2_buf, len);
+ } else if (bpw <= 16) {
+ u16 *d = (u16 *)dest;
+ __le16 *s = (__le16 *)dln2_buf;
+
+ len = len / 2;
+ while (len--)
+ *d++ = le16_to_cpup(s++);
+ } else {
+ u32 *d = (u32 *)dest;
+ __le32 *s = (__le32 *)dln2_buf;
+
+ len = len / 4;
+ while (len--)
+ *d++ = get_unaligned_le32(s++);
+ }
+#endif
+
+ return 0;
+}
+
+/*
+ * Perform one write operation.
+ */
+static int dln2_spi_write_one(struct dln2_spi *dln2, const u8 *data,
+ u16 data_len, u8 attr)
+{
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx = dln2->buf;
+ unsigned tx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ return dln2_transfer_tx(dln2->pdev, DLN2_SPI_WRITE, tx, tx_len);
+}
+
+/*
+ * Perform one read operation.
+ */
+static int dln2_spi_read_one(struct dln2_spi *dln2, u8 *data,
+ u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ } __packed tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx = dln2->buf;
+ unsigned rx_len = sizeof(*rx);
+
+ BUILD_BUG_ON(sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ tx.port = dln2->port;
+ tx.size = cpu_to_le16(data_len);
+ tx.attr = attr;
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ, &tx, sizeof(tx),
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Perform one write & read operation.
+ */
+static int dln2_spi_read_write_one(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr)
+{
+ int ret;
+ struct {
+ u8 port;
+ __le16 size;
+ u8 attr;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *tx;
+ struct {
+ __le16 size;
+ u8 buf[DLN2_SPI_MAX_XFER_SIZE];
+ } __packed *rx;
+ unsigned tx_len, rx_len;
+
+ BUILD_BUG_ON(sizeof(*tx) > DLN2_SPI_BUF_SIZE ||
+ sizeof(*rx) > DLN2_SPI_BUF_SIZE);
+
+ if (data_len > DLN2_SPI_MAX_XFER_SIZE)
+ return -EINVAL;
+
+ /*
+ * Since this is a pseudo full-duplex communication, we're perfectly
+ * safe to use the same buffer for both tx and rx. When DLN2 sends the
+ * response back, with the rx data, we don't need the tx buffer anymore.
+ */
+ tx = dln2->buf;
+ rx = dln2->buf;
+
+ tx->port = dln2->port;
+ tx->size = cpu_to_le16(data_len);
+ tx->attr = attr;
+
+ dln2_spi_copy_to_buf(tx->buf, tx_data, data_len, dln2->bpw);
+
+ tx_len = sizeof(*tx) + data_len - DLN2_SPI_MAX_XFER_SIZE;
+ rx_len = sizeof(*rx);
+
+ ret = dln2_transfer(dln2->pdev, DLN2_SPI_READ_WRITE, tx, tx_len,
+ rx, &rx_len);
+ if (ret < 0)
+ return ret;
+ if (rx_len < sizeof(rx->size) + data_len)
+ return -EPROTO;
+ if (le16_to_cpu(rx->size) != data_len)
+ return -EPROTO;
+
+ dln2_spi_copy_from_buf(rx_data, rx->buf, data_len, dln2->bpw);
+
+ return 0;
+}
+
+/*
+ * Read/Write wrapper. It will automatically split an operation into multiple
+ * single ones due to device buffer constraints.
+ */
+static int dln2_spi_rdwr(struct dln2_spi *dln2, const u8 *tx_data,
+ u8 *rx_data, u16 data_len, u8 attr) {
+ int ret;
+ u16 len;
+ u8 temp_attr;
+ u16 remaining = data_len;
+ u16 offset;
+
+ do {
+ if (remaining > DLN2_SPI_MAX_XFER_SIZE) {
+ len = DLN2_SPI_MAX_XFER_SIZE;
+ temp_attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+ } else {
+ len = remaining;
+ temp_attr = attr;
+ }
+
+ offset = data_len - remaining;
+
+ if (tx_data && rx_data) {
+ ret = dln2_spi_read_write_one(dln2,
+ tx_data + offset,
+ rx_data + offset,
+ len, temp_attr);
+ } else if (tx_data) {
+ ret = dln2_spi_write_one(dln2,
+ tx_data + offset,
+ len, temp_attr);
+ } else if (rx_data) {
+ ret = dln2_spi_read_one(dln2,
+ rx_data + offset,
+ len, temp_attr);
+ } else {
+ return -EINVAL;
+ }
+
+ if (ret < 0)
+ return ret;
+
+ remaining -= len;
+ } while (remaining);
+
+ return 0;
+}
+
+static int dln2_spi_prepare_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ int ret;
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+
+ if (dln2->cs != spi->chip_select) {
+ ret = dln2_spi_cs_set_one(dln2, spi->chip_select);
+ if (ret < 0)
+ return ret;
+
+ dln2->cs = spi->chip_select;
+ }
+
+ return 0;
+}
+
+static int dln2_spi_transfer_setup(struct dln2_spi *dln2, u32 speed,
+ u8 bpw, u8 mode)
+{
+ int ret;
+ bool bus_setup_change;
+
+ bus_setup_change = dln2->speed != speed || dln2->mode != mode ||
+ dln2->bpw != bpw;
+
+ if (!bus_setup_change)
+ return 0;
+
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+
+ if (dln2->speed != speed) {
+ ret = dln2_spi_set_speed(dln2, speed);
+ if (ret < 0)
+ return ret;
+
+ dln2->speed = speed;
+ }
+
+ if (dln2->mode != mode) {
+ ret = dln2_spi_set_mode(dln2, mode & 0x3);
+ if (ret < 0)
+ return ret;
+
+ dln2->mode = mode;
+ }
+
+ if (dln2->bpw != bpw) {
+ ret = dln2_spi_set_bpw(dln2, bpw);
+ if (ret < 0)
+ return ret;
+
+ dln2->bpw = bpw;
+ }
+
+ return dln2_spi_enable(dln2, true);
+}
+
+static int dln2_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+ int status;
+ u8 attr = 0;
+
+ status = dln2_spi_transfer_setup(dln2, xfer->speed_hz,
+ xfer->bits_per_word,
+ spi->mode);
+ if (status < 0) {
+ dev_err(&dln2->pdev->dev, "Cannot setup transfer\n");
+ return status;
+ }
+
+ if (!xfer->cs_change && !spi_transfer_is_last(master, xfer))
+ attr = DLN2_SPI_ATTR_LEAVE_SS_LOW;
+
+ status = dln2_spi_rdwr(dln2, xfer->tx_buf, xfer->rx_buf,
+ xfer->len, attr);
+ if (status < 0)
+ dev_err(&dln2->pdev->dev, "write/read failed!\n");
+
+ return status;
+}
+
+static int dln2_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct dln2_spi *dln2;
+ struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*dln2));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ dln2 = spi_master_get_devdata(master);
+
+ dln2->buf = devm_kmalloc(&pdev->dev, DLN2_SPI_BUF_SIZE, GFP_KERNEL);
+ if (!dln2->buf) {
+ ret = -ENOMEM;
+ goto exit_free_master;
+ }
+
+ dln2->master = master;
+ dln2->pdev = pdev;
+ dln2->port = pdata->port;
+ /* cs/mode can never be 0xff, so the first transfer will set them */
+ dln2->cs = 0xff;
+ dln2->mode = 0xff;
+
+ /* disable SPI module before continuing with the setup */
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_cs_num(dln2, &master->num_chipselect);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to get number of CS pins\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_speed_range(dln2,
+ &master->min_speed_hz,
+ &master->max_speed_hz);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read bus min/max freqs\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_get_supported_frame_sizes(dln2,
+ &master->bits_per_word_mask);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to read supported frame sizes\n");
+ goto exit_free_master;
+ }
+
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable CS pins\n");
+ goto exit_free_master;
+ }
+
+ master->bus_num = -1;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->prepare_message = dln2_spi_prepare_message;
+ master->transfer_one = dln2_spi_transfer_one;
+ master->auto_runtime_pm = true;
+
+ /* enable SPI module, we're good to go */
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to enable SPI module\n");
+ goto exit_free_master;
+ }
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ DLN2_RPM_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto exit_register;
+ }
+
+ return ret;
+
+exit_register:
+ pm_runtime_disable(&pdev->dev);
+ pm_runtime_set_suspended(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+exit_free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int dln2_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ if (dln2_spi_enable(dln2, false) < 0)
+ dev_err(&pdev->dev, "Failed to disable SPI module\n");
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int dln2_spi_suspend(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(master);
+ if (ret < 0)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_enable(dln2, false);
+ if (ret < 0)
+ return ret;
+ }
+
+ /*
+ * USB power may be cut off during sleep. Resetting the following
+ * parameters will force the board to be set up before first transfer.
+ */
+ dln2->cs = 0xff;
+ dln2->speed = 0;
+ dln2->bpw = 0;
+ dln2->mode = 0xff;
+
+ return 0;
+}
+
+static int dln2_spi_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = dln2_spi_cs_enable_all(dln2, true);
+ if (ret < 0)
+ return ret;
+
+ ret = dln2_spi_enable(dln2, true);
+ if (ret < 0)
+ return ret;
+ }
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int dln2_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, false);
+}
+
+static int dln2_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct dln2_spi *dln2 = spi_master_get_devdata(master);
+
+ return dln2_spi_enable(dln2, true);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops dln2_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(dln2_spi_suspend, dln2_spi_resume)
+ SET_RUNTIME_PM_OPS(dln2_spi_runtime_suspend,
+ dln2_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver spi_dln2_driver = {
+ .driver = {
+ .name = "dln2-spi",
+ .pm = &dln2_spi_pm,
+ },
+ .probe = dln2_spi_probe,
+ .remove = dln2_spi_remove,
+};
+module_platform_driver(spi_dln2_driver);
+
+MODULE_DESCRIPTION("Driver for the Diolan DLN2 SPI master interface");
+MODULE_AUTHOR("Laurentiu Palcu <laurentiu.palcu@intel.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:dln2-spi");
diff --git a/drivers/spi/spi-dw-mid.c b/drivers/spi/spi-dw-mid.c
new file mode 100644
index 000000000..bb1052e74
--- /dev/null
+++ b/drivers/spi/spi-dw-mid.c
@@ -0,0 +1,330 @@
+/*
+ * Special handling for DW core on Intel MID platform
+ *
+ * Copyright (c) 2009, 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+#include "spi-dw.h"
+
+#ifdef CONFIG_SPI_DW_MID_DMA
+#include <linux/pci.h>
+#include <linux/platform_data/dma-dw.h>
+
+#define RX_BUSY 0
+#define TX_BUSY 1
+
+static struct dw_dma_slave mid_dma_tx = { .dst_id = 1 };
+static struct dw_dma_slave mid_dma_rx = { .src_id = 0 };
+
+static bool mid_spi_dma_chan_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *s = param;
+
+ if (s->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = s;
+ return true;
+}
+
+static int mid_spi_dma_init(struct dw_spi *dws)
+{
+ struct pci_dev *dma_dev;
+ struct dw_dma_slave *tx = dws->dma_tx;
+ struct dw_dma_slave *rx = dws->dma_rx;
+ dma_cap_mask_t mask;
+
+ /*
+ * Get pci device for DMA controller, currently it could only
+ * be the DMA controller of Medfield
+ */
+ dma_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x0827, NULL);
+ if (!dma_dev)
+ return -ENODEV;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* 1. Init rx channel */
+ rx->dma_dev = &dma_dev->dev;
+ dws->rxchan = dma_request_channel(mask, mid_spi_dma_chan_filter, rx);
+ if (!dws->rxchan)
+ goto err_exit;
+ dws->master->dma_rx = dws->rxchan;
+
+ /* 2. Init tx channel */
+ tx->dma_dev = &dma_dev->dev;
+ dws->txchan = dma_request_channel(mask, mid_spi_dma_chan_filter, tx);
+ if (!dws->txchan)
+ goto free_rxchan;
+ dws->master->dma_tx = dws->txchan;
+
+ dws->dma_inited = 1;
+ return 0;
+
+free_rxchan:
+ dma_release_channel(dws->rxchan);
+err_exit:
+ return -EBUSY;
+}
+
+static void mid_spi_dma_exit(struct dw_spi *dws)
+{
+ if (!dws->dma_inited)
+ return;
+
+ dmaengine_terminate_all(dws->txchan);
+ dma_release_channel(dws->txchan);
+
+ dmaengine_terminate_all(dws->rxchan);
+ dma_release_channel(dws->rxchan);
+}
+
+static irqreturn_t dma_transfer(struct dw_spi *dws)
+{
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ dw_readl(dws, DW_SPI_ICR);
+ spi_reset_chip(dws);
+
+ dev_err(&dws->master->dev, "%s: FIFO overrun/underrun\n", __func__);
+ dws->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(dws->master);
+ return IRQ_HANDLED;
+}
+
+static bool mid_spi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct dw_spi *dws = spi_master_get_devdata(master);
+
+ if (!dws->dma_inited)
+ return false;
+
+ return xfer->len > dws->fifo_len;
+}
+
+static enum dma_slave_buswidth convert_dma_width(u32 dma_width) {
+ if (dma_width == 1)
+ return DMA_SLAVE_BUSWIDTH_1_BYTE;
+ else if (dma_width == 2)
+ return DMA_SLAVE_BUSWIDTH_2_BYTES;
+
+ return DMA_SLAVE_BUSWIDTH_UNDEFINED;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for tx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_tx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy))
+ return;
+ spi_finalize_current_transfer(dws->master);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_tx(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_config txconf;
+ struct dma_async_tx_descriptor *txdesc;
+
+ if (!xfer->tx_buf)
+ return NULL;
+
+ txconf.direction = DMA_MEM_TO_DEV;
+ txconf.dst_addr = dws->dma_addr;
+ txconf.dst_maxburst = 16;
+ txconf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ txconf.dst_addr_width = convert_dma_width(dws->dma_width);
+ txconf.device_fc = false;
+
+ dmaengine_slave_config(dws->txchan, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(dws->txchan,
+ xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ return NULL;
+
+ txdesc->callback = dw_spi_dma_tx_done;
+ txdesc->callback_param = dws;
+
+ return txdesc;
+}
+
+/*
+ * dws->dma_chan_busy is set before the dma transfer starts, callback for rx
+ * channel will clear a corresponding bit.
+ */
+static void dw_spi_dma_rx_done(void *arg)
+{
+ struct dw_spi *dws = arg;
+
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy))
+ return;
+ spi_finalize_current_transfer(dws->master);
+}
+
+static struct dma_async_tx_descriptor *dw_spi_dma_prepare_rx(struct dw_spi *dws,
+ struct spi_transfer *xfer)
+{
+ struct dma_slave_config rxconf;
+ struct dma_async_tx_descriptor *rxdesc;
+
+ if (!xfer->rx_buf)
+ return NULL;
+
+ rxconf.direction = DMA_DEV_TO_MEM;
+ rxconf.src_addr = dws->dma_addr;
+ rxconf.src_maxburst = 16;
+ rxconf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ rxconf.src_addr_width = convert_dma_width(dws->dma_width);
+ rxconf.device_fc = false;
+
+ dmaengine_slave_config(dws->rxchan, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(dws->rxchan,
+ xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ return NULL;
+
+ rxdesc->callback = dw_spi_dma_rx_done;
+ rxdesc->callback_param = dws;
+
+ return rxdesc;
+}
+
+static int mid_spi_dma_setup(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ u16 dma_ctrl = 0;
+
+ dw_writel(dws, DW_SPI_DMARDLR, 0xf);
+ dw_writel(dws, DW_SPI_DMATDLR, 0x10);
+
+ if (xfer->tx_buf)
+ dma_ctrl |= SPI_DMA_TDMAE;
+ if (xfer->rx_buf)
+ dma_ctrl |= SPI_DMA_RDMAE;
+ dw_writel(dws, DW_SPI_DMACR, dma_ctrl);
+
+ /* Set the interrupt mask */
+ spi_umask_intr(dws, SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI);
+
+ dws->transfer_handler = dma_transfer;
+
+ return 0;
+}
+
+static int mid_spi_dma_transfer(struct dw_spi *dws, struct spi_transfer *xfer)
+{
+ struct dma_async_tx_descriptor *txdesc, *rxdesc;
+
+ /* Prepare the TX dma transfer */
+ txdesc = dw_spi_dma_prepare_tx(dws, xfer);
+
+ /* Prepare the RX dma transfer */
+ rxdesc = dw_spi_dma_prepare_rx(dws, xfer);
+
+ /* rx must be started before tx due to spi instinct */
+ if (rxdesc) {
+ set_bit(RX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(dws->rxchan);
+ }
+
+ if (txdesc) {
+ set_bit(TX_BUSY, &dws->dma_chan_busy);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(dws->txchan);
+ }
+
+ return 0;
+}
+
+static void mid_spi_dma_stop(struct dw_spi *dws)
+{
+ if (test_bit(TX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_all(dws->txchan);
+ clear_bit(TX_BUSY, &dws->dma_chan_busy);
+ }
+ if (test_bit(RX_BUSY, &dws->dma_chan_busy)) {
+ dmaengine_terminate_all(dws->rxchan);
+ clear_bit(RX_BUSY, &dws->dma_chan_busy);
+ }
+}
+
+static struct dw_spi_dma_ops mid_dma_ops = {
+ .dma_init = mid_spi_dma_init,
+ .dma_exit = mid_spi_dma_exit,
+ .dma_setup = mid_spi_dma_setup,
+ .can_dma = mid_spi_can_dma,
+ .dma_transfer = mid_spi_dma_transfer,
+ .dma_stop = mid_spi_dma_stop,
+};
+#endif
+
+/* Some specific info for SPI0 controller on Intel MID */
+
+/* HW info for MRST Clk Control Unit, 32b reg per controller */
+#define MRST_SPI_CLK_BASE 100000000 /* 100m */
+#define MRST_CLK_SPI_REG 0xff11d86c
+#define CLK_SPI_BDIV_OFFSET 0
+#define CLK_SPI_BDIV_MASK 0x00000007
+#define CLK_SPI_CDIV_OFFSET 9
+#define CLK_SPI_CDIV_MASK 0x00000e00
+#define CLK_SPI_DISABLE_OFFSET 8
+
+int dw_spi_mid_init(struct dw_spi *dws)
+{
+ void __iomem *clk_reg;
+ u32 clk_cdiv;
+
+ clk_reg = ioremap_nocache(MRST_CLK_SPI_REG, 16);
+ if (!clk_reg)
+ return -ENOMEM;
+
+ /* Get SPI controller operating freq info */
+ clk_cdiv = readl(clk_reg + dws->bus_num * sizeof(u32));
+ clk_cdiv &= CLK_SPI_CDIV_MASK;
+ clk_cdiv >>= CLK_SPI_CDIV_OFFSET;
+ dws->max_freq = MRST_SPI_CLK_BASE / (clk_cdiv + 1);
+
+ iounmap(clk_reg);
+
+#ifdef CONFIG_SPI_DW_MID_DMA
+ dws->dma_tx = &mid_dma_tx;
+ dws->dma_rx = &mid_dma_rx;
+ dws->dma_ops = &mid_dma_ops;
+#endif
+ return 0;
+}
diff --git a/drivers/spi/spi-dw-mmio.c b/drivers/spi/spi-dw-mmio.c
new file mode 100644
index 000000000..eb03e1215
--- /dev/null
+++ b/drivers/spi/spi-dw-mmio.c
@@ -0,0 +1,145 @@
+/*
+ * Memory-mapped interface driver for DW SPI Core
+ *
+ * Copyright (c) 2010, Octasic semiconductor.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+
+#include "spi-dw.h"
+
+#define DRIVER_NAME "dw_spi_mmio"
+
+struct dw_spi_mmio {
+ struct dw_spi dws;
+ struct clk *clk;
+};
+
+static int dw_spi_mmio_probe(struct platform_device *pdev)
+{
+ struct dw_spi_mmio *dwsmmio;
+ struct dw_spi *dws;
+ struct resource *mem;
+ int ret;
+ int num_cs;
+
+ dwsmmio = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_mmio),
+ GFP_KERNEL);
+ if (!dwsmmio)
+ return -ENOMEM;
+
+ dws = &dwsmmio->dws;
+
+ /* Get basic io resource and map it */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "no mem resource?\n");
+ return -EINVAL;
+ }
+
+ dws->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(dws->regs)) {
+ dev_err(&pdev->dev, "SPI region map failed\n");
+ return PTR_ERR(dws->regs);
+ }
+
+ dws->irq = platform_get_irq(pdev, 0);
+ if (dws->irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return dws->irq; /* -ENXIO */
+ }
+
+ dwsmmio->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dwsmmio->clk))
+ return PTR_ERR(dwsmmio->clk);
+ ret = clk_prepare_enable(dwsmmio->clk);
+ if (ret)
+ return ret;
+
+ dws->bus_num = pdev->id;
+
+ dws->max_freq = clk_get_rate(dwsmmio->clk);
+
+ num_cs = 4;
+
+ if (pdev->dev.of_node)
+ of_property_read_u32(pdev->dev.of_node, "num-cs", &num_cs);
+
+ dws->num_cs = num_cs;
+
+ if (pdev->dev.of_node) {
+ int i;
+
+ for (i = 0; i < dws->num_cs; i++) {
+ int cs_gpio = of_get_named_gpio(pdev->dev.of_node,
+ "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER) {
+ ret = cs_gpio;
+ goto out;
+ }
+
+ if (gpio_is_valid(cs_gpio)) {
+ ret = devm_gpio_request(&pdev->dev, cs_gpio,
+ dev_name(&pdev->dev));
+ if (ret)
+ goto out;
+ }
+ }
+ }
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret)
+ goto out;
+
+ platform_set_drvdata(pdev, dwsmmio);
+ return 0;
+
+out:
+ clk_disable_unprepare(dwsmmio->clk);
+ return ret;
+}
+
+static int dw_spi_mmio_remove(struct platform_device *pdev)
+{
+ struct dw_spi_mmio *dwsmmio = platform_get_drvdata(pdev);
+
+ clk_disable_unprepare(dwsmmio->clk);
+ dw_spi_remove_host(&dwsmmio->dws);
+
+ return 0;
+}
+
+static const struct of_device_id dw_spi_mmio_of_match[] = {
+ { .compatible = "snps,dw-apb-ssi", },
+ { /* end of table */}
+};
+MODULE_DEVICE_TABLE(of, dw_spi_mmio_of_match);
+
+static struct platform_driver dw_spi_mmio_driver = {
+ .probe = dw_spi_mmio_probe,
+ .remove = dw_spi_mmio_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = dw_spi_mmio_of_match,
+ },
+};
+module_platform_driver(dw_spi_mmio_driver);
+
+MODULE_AUTHOR("Jean-Hugues Deschenes <jean-hugues.deschenes@octasic.com>");
+MODULE_DESCRIPTION("Memory-mapped I/O interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-dw-pci.c b/drivers/spi/spi-dw-pci.c
new file mode 100644
index 000000000..6d331e0db
--- /dev/null
+++ b/drivers/spi/spi-dw-pci.c
@@ -0,0 +1,164 @@
+/*
+ * PCI interface driver for DW SPI Core
+ *
+ * Copyright (c) 2009, 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+
+#include "spi-dw.h"
+
+#define DRIVER_NAME "dw_spi_pci"
+
+struct dw_spi_pci {
+ struct pci_dev *pdev;
+ struct dw_spi dws;
+};
+
+struct spi_pci_desc {
+ int (*setup)(struct dw_spi *);
+ u16 num_cs;
+ u16 bus_num;
+};
+
+static struct spi_pci_desc spi_pci_mid_desc_1 = {
+ .setup = dw_spi_mid_init,
+ .num_cs = 5,
+ .bus_num = 0,
+};
+
+static struct spi_pci_desc spi_pci_mid_desc_2 = {
+ .setup = dw_spi_mid_init,
+ .num_cs = 2,
+ .bus_num = 1,
+};
+
+static int spi_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ struct dw_spi_pci *dwpci;
+ struct dw_spi *dws;
+ struct spi_pci_desc *desc = (struct spi_pci_desc *)ent->driver_data;
+ int pci_bar = 0;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret)
+ return ret;
+
+ dwpci = devm_kzalloc(&pdev->dev, sizeof(struct dw_spi_pci),
+ GFP_KERNEL);
+ if (!dwpci)
+ return -ENOMEM;
+
+ dwpci->pdev = pdev;
+ dws = &dwpci->dws;
+
+ /* Get basic io resource and map it */
+ dws->paddr = pci_resource_start(pdev, pci_bar);
+
+ ret = pcim_iomap_regions(pdev, 1 << pci_bar, pci_name(pdev));
+ if (ret)
+ return ret;
+
+ dws->regs = pcim_iomap_table(pdev)[pci_bar];
+
+ dws->irq = pdev->irq;
+
+ /*
+ * Specific handling for paltforms, like dma setup,
+ * clock rate, FIFO depth.
+ */
+ if (desc) {
+ dws->num_cs = desc->num_cs;
+ dws->bus_num = desc->bus_num;
+
+ if (desc->setup) {
+ ret = desc->setup(dws);
+ if (ret)
+ return ret;
+ }
+ } else {
+ return -ENODEV;
+ }
+
+ ret = dw_spi_add_host(&pdev->dev, dws);
+ if (ret)
+ return ret;
+
+ /* PCI hook and SPI hook use the same drv data */
+ pci_set_drvdata(pdev, dwpci);
+
+ dev_info(&pdev->dev, "found PCI SPI controller(ID: %04x:%04x)\n",
+ pdev->vendor, pdev->device);
+
+ return 0;
+}
+
+static void spi_pci_remove(struct pci_dev *pdev)
+{
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ dw_spi_remove_host(&dwpci->dws);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_suspend(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ return dw_spi_suspend_host(&dwpci->dws);
+}
+
+static int spi_resume(struct device *dev)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct dw_spi_pci *dwpci = pci_get_drvdata(pdev);
+
+ return dw_spi_resume_host(&dwpci->dws);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(dw_spi_pm_ops, spi_suspend, spi_resume);
+
+static const struct pci_device_id pci_ids[] = {
+ /* Intel MID platform SPI controller 0 */
+ /*
+ * The access to the device 8086:0801 is disabled by HW, since it's
+ * exclusively used by SCU to communicate with MSIC.
+ */
+ /* Intel MID platform SPI controller 1 */
+ { PCI_VDEVICE(INTEL, 0x0800), (kernel_ulong_t)&spi_pci_mid_desc_1},
+ /* Intel MID platform SPI controller 2 */
+ { PCI_VDEVICE(INTEL, 0x0812), (kernel_ulong_t)&spi_pci_mid_desc_2},
+ {},
+};
+
+static struct pci_driver dw_spi_driver = {
+ .name = DRIVER_NAME,
+ .id_table = pci_ids,
+ .probe = spi_pci_probe,
+ .remove = spi_pci_remove,
+ .driver = {
+ .pm = &dw_spi_pm_ops,
+ },
+};
+
+module_pci_driver(dw_spi_driver);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("PCI interface driver for DW SPI Core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
new file mode 100644
index 000000000..8d67d03c7
--- /dev/null
+++ b/drivers/spi/spi-dw.c
@@ -0,0 +1,623 @@
+/*
+ * Designware SPI core controller driver (refer pxa2xx_spi.c)
+ *
+ * Copyright (c) 2009, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/highmem.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+
+#include "spi-dw.h"
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
+#endif
+
+/* Slave spi_dev related */
+struct chip_data {
+ u16 cr0;
+ u8 cs; /* chip select pin */
+ u8 n_bytes; /* current is a 1/2/4 byte op */
+ u8 tmode; /* TR/TO/RO/EEPROM */
+ u8 type; /* SPI/SSP/MicroWire */
+
+ u8 poll_mode; /* 1 means use poll mode */
+
+ u32 dma_width;
+ u32 rx_threshold;
+ u32 tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u16 clk_div; /* baud rate divider */
+ u32 speed_hz; /* baud rate */
+ void (*cs_control)(u32 command);
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define SPI_REGS_BUFSIZE 1024
+static ssize_t dw_spi_show_regs(struct file *file, char __user *user_buf,
+ size_t count, loff_t *ppos)
+{
+ struct dw_spi *dws = file->private_data;
+ char *buf;
+ u32 len = 0;
+ ssize_t ret;
+
+ buf = kzalloc(SPI_REGS_BUFSIZE, GFP_KERNEL);
+ if (!buf)
+ return 0;
+
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "%s registers:\n", dev_name(&dws->master->dev));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL0: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL0));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "CTRL1: \t\t0x%08x\n", dw_readl(dws, DW_SPI_CTRL1));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SSIENR: \t0x%08x\n", dw_readl(dws, DW_SPI_SSIENR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SER: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SER));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "BAUDR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_BAUDR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_TXFLTR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFTLR: \t0x%08x\n", dw_readl(dws, DW_SPI_RXFLTR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "TXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_TXFLR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "RXFLR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_RXFLR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "SR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_SR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "IMR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_IMR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "ISR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_ISR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMACR: \t\t0x%08x\n", dw_readl(dws, DW_SPI_DMACR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMATDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMATDLR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "DMARDLR: \t0x%08x\n", dw_readl(dws, DW_SPI_DMARDLR));
+ len += snprintf(buf + len, SPI_REGS_BUFSIZE - len,
+ "=================================\n");
+
+ ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
+ kfree(buf);
+ return ret;
+}
+
+static const struct file_operations dw_spi_regs_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .read = dw_spi_show_regs,
+ .llseek = default_llseek,
+};
+
+static int dw_spi_debugfs_init(struct dw_spi *dws)
+{
+ dws->debugfs = debugfs_create_dir("dw_spi", NULL);
+ if (!dws->debugfs)
+ return -ENOMEM;
+
+ debugfs_create_file("registers", S_IFREG | S_IRUGO,
+ dws->debugfs, (void *)dws, &dw_spi_regs_ops);
+ return 0;
+}
+
+static void dw_spi_debugfs_remove(struct dw_spi *dws)
+{
+ debugfs_remove_recursive(dws->debugfs);
+}
+
+#else
+static inline int dw_spi_debugfs_init(struct dw_spi *dws)
+{
+ return 0;
+}
+
+static inline void dw_spi_debugfs_remove(struct dw_spi *dws)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static void dw_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct dw_spi *dws = spi_master_get_devdata(spi->master);
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ /* Chip select logic is inverted from spi_set_cs() */
+ if (chip && chip->cs_control)
+ chip->cs_control(!enable);
+
+ if (!enable)
+ dw_writel(dws, DW_SPI_SER, BIT(spi->chip_select));
+}
+
+/* Return the max entries we can fill into tx fifo */
+static inline u32 tx_max(struct dw_spi *dws)
+{
+ u32 tx_left, tx_room, rxtx_gap;
+
+ tx_left = (dws->tx_end - dws->tx) / dws->n_bytes;
+ tx_room = dws->fifo_len - dw_readl(dws, DW_SPI_TXFLR);
+
+ /*
+ * Another concern is about the tx/rx mismatch, we
+ * though to use (dws->fifo_len - rxflr - txflr) as
+ * one maximum value for tx, but it doesn't cover the
+ * data which is out of tx/rx fifo and inside the
+ * shift registers. So a control from sw point of
+ * view is taken.
+ */
+ rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx))
+ / dws->n_bytes;
+
+ return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap));
+}
+
+/* Return the max entries we should read out of rx fifo */
+static inline u32 rx_max(struct dw_spi *dws)
+{
+ u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes;
+
+ return min_t(u32, rx_left, dw_readl(dws, DW_SPI_RXFLR));
+}
+
+static void dw_writer(struct dw_spi *dws)
+{
+ u32 max = tx_max(dws);
+ u16 txw = 0;
+
+ while (max--) {
+ /* Set the tx word if the transfer's original "tx" is not null */
+ if (dws->tx_end - dws->len) {
+ if (dws->n_bytes == 1)
+ txw = *(u8 *)(dws->tx);
+ else
+ txw = *(u16 *)(dws->tx);
+ }
+ dw_writel(dws, DW_SPI_DR, txw);
+ dws->tx += dws->n_bytes;
+ }
+}
+
+static void dw_reader(struct dw_spi *dws)
+{
+ u32 max = rx_max(dws);
+ u16 rxw;
+
+ while (max--) {
+ rxw = dw_readl(dws, DW_SPI_DR);
+ /* Care rx only if the transfer's original "rx" is not null */
+ if (dws->rx_end - dws->len) {
+ if (dws->n_bytes == 1)
+ *(u8 *)(dws->rx) = rxw;
+ else
+ *(u16 *)(dws->rx) = rxw;
+ }
+ dws->rx += dws->n_bytes;
+ }
+}
+
+static void int_error_stop(struct dw_spi *dws, const char *msg)
+{
+ spi_reset_chip(dws);
+
+ dev_err(&dws->master->dev, "%s\n", msg);
+ dws->master->cur_msg->status = -EIO;
+ spi_finalize_current_transfer(dws->master);
+}
+
+static irqreturn_t interrupt_transfer(struct dw_spi *dws)
+{
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR);
+
+ /* Error handling */
+ if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) {
+ dw_readl(dws, DW_SPI_ICR);
+ int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun");
+ return IRQ_HANDLED;
+ }
+
+ dw_reader(dws);
+ if (dws->rx_end == dws->rx) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ spi_finalize_current_transfer(dws->master);
+ return IRQ_HANDLED;
+ }
+ if (irq_status & SPI_INT_TXEI) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ dw_writer(dws);
+ /* Enable TX irq always, it will be disabled when RX finished */
+ spi_umask_intr(dws, SPI_INT_TXEI);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t dw_spi_irq(int irq, void *dev_id)
+{
+ struct spi_master *master = dev_id;
+ struct dw_spi *dws = spi_master_get_devdata(master);
+ u16 irq_status = dw_readl(dws, DW_SPI_ISR) & 0x3f;
+
+ if (!irq_status)
+ return IRQ_NONE;
+
+ if (!master->cur_msg) {
+ spi_mask_intr(dws, SPI_INT_TXEI);
+ return IRQ_HANDLED;
+ }
+
+ return dws->transfer_handler(dws);
+}
+
+/* Must be called inside pump_transfers() */
+static int poll_transfer(struct dw_spi *dws)
+{
+ do {
+ dw_writer(dws);
+ dw_reader(dws);
+ cpu_relax();
+ } while (dws->rx_end > dws->rx);
+
+ return 0;
+}
+
+static int dw_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *transfer)
+{
+ struct dw_spi *dws = spi_master_get_devdata(master);
+ struct chip_data *chip = spi_get_ctldata(spi);
+ u8 imask = 0;
+ u16 txlevel = 0;
+ u16 clk_div = 0;
+ u32 speed = 0;
+ u32 cr0 = 0;
+ int ret;
+
+ dws->dma_mapped = 0;
+ dws->n_bytes = chip->n_bytes;
+ dws->dma_width = chip->dma_width;
+
+ dws->tx = (void *)transfer->tx_buf;
+ dws->tx_end = dws->tx + transfer->len;
+ dws->rx = transfer->rx_buf;
+ dws->rx_end = dws->rx + transfer->len;
+ dws->len = transfer->len;
+
+ spi_enable_chip(dws, 0);
+
+ cr0 = chip->cr0;
+
+ /* Handle per transfer options for bpw and speed */
+ if (transfer->speed_hz) {
+ speed = chip->speed_hz;
+
+ if ((transfer->speed_hz != speed) || !chip->clk_div) {
+ speed = transfer->speed_hz;
+
+ /* clk_div doesn't support odd number */
+ clk_div = (dws->max_freq / speed + 1) & 0xfffe;
+
+ chip->speed_hz = speed;
+ chip->clk_div = clk_div;
+
+ spi_set_clk(dws, chip->clk_div);
+ }
+ }
+ if (transfer->bits_per_word) {
+ if (transfer->bits_per_word == 8) {
+ dws->n_bytes = 1;
+ dws->dma_width = 1;
+ } else if (transfer->bits_per_word == 16) {
+ dws->n_bytes = 2;
+ dws->dma_width = 2;
+ }
+ cr0 = (transfer->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+ }
+
+ /*
+ * Adjust transfer mode if necessary. Requires platform dependent
+ * chipselect mechanism.
+ */
+ if (chip->cs_control) {
+ if (dws->rx && dws->tx)
+ chip->tmode = SPI_TMOD_TR;
+ else if (dws->rx)
+ chip->tmode = SPI_TMOD_RO;
+ else
+ chip->tmode = SPI_TMOD_TO;
+
+ cr0 &= ~SPI_TMOD_MASK;
+ cr0 |= (chip->tmode << SPI_TMOD_OFFSET);
+ }
+
+ dw_writel(dws, DW_SPI_CTRL0, cr0);
+
+ /* Check if current transfer is a DMA transaction */
+ if (master->can_dma && master->can_dma(master, spi, transfer))
+ dws->dma_mapped = master->cur_msg_mapped;
+
+ /* For poll mode just disable all interrupts */
+ spi_mask_intr(dws, 0xff);
+
+ /*
+ * Interrupt mode
+ * we only need set the TXEI IRQ, as TX/RX always happen syncronizely
+ */
+ if (dws->dma_mapped) {
+ ret = dws->dma_ops->dma_setup(dws, transfer);
+ if (ret < 0) {
+ spi_enable_chip(dws, 1);
+ return ret;
+ }
+ } else if (!chip->poll_mode) {
+ txlevel = min_t(u16, dws->fifo_len / 2, dws->len / dws->n_bytes);
+ dw_writel(dws, DW_SPI_TXFLTR, txlevel);
+
+ /* Set the interrupt mask */
+ imask |= SPI_INT_TXEI | SPI_INT_TXOI |
+ SPI_INT_RXUI | SPI_INT_RXOI;
+ spi_umask_intr(dws, imask);
+
+ dws->transfer_handler = interrupt_transfer;
+ }
+
+ spi_enable_chip(dws, 1);
+
+ if (dws->dma_mapped) {
+ ret = dws->dma_ops->dma_transfer(dws, transfer);
+ if (ret < 0)
+ return ret;
+ }
+
+ if (chip->poll_mode)
+ return poll_transfer(dws);
+
+ return 1;
+}
+
+static void dw_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct dw_spi *dws = spi_master_get_devdata(master);
+
+ if (dws->dma_mapped)
+ dws->dma_ops->dma_stop(dws);
+
+ spi_reset_chip(dws);
+}
+
+/* This may be called twice for each spi dev */
+static int dw_spi_setup(struct spi_device *spi)
+{
+ struct dw_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+ int ret;
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ spi_set_ctldata(spi, chip);
+ }
+
+ /*
+ * Protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it
+ */
+ chip_info = spi->controller_data;
+
+ /* chip_info doesn't always exist */
+ if (chip_info) {
+ if (chip_info->cs_control)
+ chip->cs_control = chip_info->cs_control;
+
+ chip->poll_mode = chip_info->poll_mode;
+ chip->type = chip_info->type;
+
+ chip->rx_threshold = 0;
+ chip->tx_threshold = 0;
+ }
+
+ if (spi->bits_per_word == 8) {
+ chip->n_bytes = 1;
+ chip->dma_width = 1;
+ } else if (spi->bits_per_word == 16) {
+ chip->n_bytes = 2;
+ chip->dma_width = 2;
+ }
+ chip->bits_per_word = spi->bits_per_word;
+
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "No max speed HZ parameter\n");
+ return -EINVAL;
+ }
+
+ chip->tmode = 0; /* Tx & Rx */
+ /* Default SPI mode is SCPOL = 0, SCPH = 0 */
+ chip->cr0 = (chip->bits_per_word - 1)
+ | (chip->type << SPI_FRF_OFFSET)
+ | (spi->mode << SPI_MODE_OFFSET)
+ | (chip->tmode << SPI_TMOD_OFFSET);
+
+ if (spi->mode & SPI_LOOP)
+ chip->cr0 |= 1 << SPI_SRL_OFFSET;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_direction_output(spi->cs_gpio,
+ !(spi->mode & SPI_CS_HIGH));
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dw_spi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ kfree(chip);
+ spi_set_ctldata(spi, NULL);
+}
+
+/* Restart the controller, disable all interrupts, clean rx fifo */
+static void spi_hw_init(struct device *dev, struct dw_spi *dws)
+{
+ spi_reset_chip(dws);
+
+ /*
+ * Try to detect the FIFO depth if not set by interface driver,
+ * the depth could be from 2 to 256 from HW spec
+ */
+ if (!dws->fifo_len) {
+ u32 fifo;
+
+ for (fifo = 1; fifo < 256; fifo++) {
+ dw_writel(dws, DW_SPI_TXFLTR, fifo);
+ if (fifo != dw_readl(dws, DW_SPI_TXFLTR))
+ break;
+ }
+ dw_writel(dws, DW_SPI_TXFLTR, 0);
+
+ dws->fifo_len = (fifo == 1) ? 0 : fifo;
+ dev_dbg(dev, "Detected FIFO size: %u bytes\n", dws->fifo_len);
+ }
+}
+
+int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
+{
+ struct spi_master *master;
+ int ret;
+
+ BUG_ON(dws == NULL);
+
+ master = spi_alloc_master(dev, 0);
+ if (!master)
+ return -ENOMEM;
+
+ dws->master = master;
+ dws->type = SSI_MOTO_SPI;
+ dws->dma_inited = 0;
+ dws->dma_addr = (dma_addr_t)(dws->paddr + 0x60);
+ snprintf(dws->name, sizeof(dws->name), "dw_spi%d", dws->bus_num);
+
+ ret = devm_request_irq(dev, dws->irq, dw_spi_irq, IRQF_SHARED,
+ dws->name, master);
+ if (ret < 0) {
+ dev_err(&master->dev, "can not get IRQ\n");
+ goto err_free_master;
+ }
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->bus_num = dws->bus_num;
+ master->num_chipselect = dws->num_cs;
+ master->setup = dw_spi_setup;
+ master->cleanup = dw_spi_cleanup;
+ master->set_cs = dw_spi_set_cs;
+ master->transfer_one = dw_spi_transfer_one;
+ master->handle_err = dw_spi_handle_err;
+ master->max_speed_hz = dws->max_freq;
+ master->dev.of_node = dev->of_node;
+
+ /* Basic HW init */
+ spi_hw_init(dev, dws);
+
+ if (dws->dma_ops && dws->dma_ops->dma_init) {
+ ret = dws->dma_ops->dma_init(dws);
+ if (ret) {
+ dev_warn(dev, "DMA init failed\n");
+ dws->dma_inited = 0;
+ } else {
+ master->can_dma = dws->dma_ops->can_dma;
+ }
+ }
+
+ spi_master_set_devdata(master, dws);
+ ret = devm_spi_register_master(dev, master);
+ if (ret) {
+ dev_err(&master->dev, "problem registering spi master\n");
+ goto err_dma_exit;
+ }
+
+ dw_spi_debugfs_init(dws);
+ return 0;
+
+err_dma_exit:
+ if (dws->dma_ops && dws->dma_ops->dma_exit)
+ dws->dma_ops->dma_exit(dws);
+ spi_enable_chip(dws, 0);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_spi_add_host);
+
+void dw_spi_remove_host(struct dw_spi *dws)
+{
+ if (!dws)
+ return;
+ dw_spi_debugfs_remove(dws);
+
+ if (dws->dma_ops && dws->dma_ops->dma_exit)
+ dws->dma_ops->dma_exit(dws);
+ spi_enable_chip(dws, 0);
+ /* Disable clk */
+ spi_set_clk(dws, 0);
+}
+EXPORT_SYMBOL_GPL(dw_spi_remove_host);
+
+int dw_spi_suspend_host(struct dw_spi *dws)
+{
+ int ret = 0;
+
+ ret = spi_master_suspend(dws->master);
+ if (ret)
+ return ret;
+ spi_enable_chip(dws, 0);
+ spi_set_clk(dws, 0);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_spi_suspend_host);
+
+int dw_spi_resume_host(struct dw_spi *dws)
+{
+ int ret;
+
+ spi_hw_init(&dws->master->dev, dws);
+ ret = spi_master_resume(dws->master);
+ if (ret)
+ dev_err(&dws->master->dev, "fail to start queue (%d)\n", ret);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dw_spi_resume_host);
+
+MODULE_AUTHOR("Feng Tang <feng.tang@intel.com>");
+MODULE_DESCRIPTION("Driver for DesignWare SPI controller core");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-dw.h b/drivers/spi/spi-dw.h
new file mode 100644
index 000000000..6c91391c1
--- /dev/null
+++ b/drivers/spi/spi-dw.h
@@ -0,0 +1,212 @@
+#ifndef DW_SPI_HEADER_H
+#define DW_SPI_HEADER_H
+
+#include <linux/io.h>
+#include <linux/scatterlist.h>
+#include <linux/gpio.h>
+
+/* Register offsets */
+#define DW_SPI_CTRL0 0x00
+#define DW_SPI_CTRL1 0x04
+#define DW_SPI_SSIENR 0x08
+#define DW_SPI_MWCR 0x0c
+#define DW_SPI_SER 0x10
+#define DW_SPI_BAUDR 0x14
+#define DW_SPI_TXFLTR 0x18
+#define DW_SPI_RXFLTR 0x1c
+#define DW_SPI_TXFLR 0x20
+#define DW_SPI_RXFLR 0x24
+#define DW_SPI_SR 0x28
+#define DW_SPI_IMR 0x2c
+#define DW_SPI_ISR 0x30
+#define DW_SPI_RISR 0x34
+#define DW_SPI_TXOICR 0x38
+#define DW_SPI_RXOICR 0x3c
+#define DW_SPI_RXUICR 0x40
+#define DW_SPI_MSTICR 0x44
+#define DW_SPI_ICR 0x48
+#define DW_SPI_DMACR 0x4c
+#define DW_SPI_DMATDLR 0x50
+#define DW_SPI_DMARDLR 0x54
+#define DW_SPI_IDR 0x58
+#define DW_SPI_VERSION 0x5c
+#define DW_SPI_DR 0x60
+
+/* Bit fields in CTRLR0 */
+#define SPI_DFS_OFFSET 0
+
+#define SPI_FRF_OFFSET 4
+#define SPI_FRF_SPI 0x0
+#define SPI_FRF_SSP 0x1
+#define SPI_FRF_MICROWIRE 0x2
+#define SPI_FRF_RESV 0x3
+
+#define SPI_MODE_OFFSET 6
+#define SPI_SCPH_OFFSET 6
+#define SPI_SCOL_OFFSET 7
+
+#define SPI_TMOD_OFFSET 8
+#define SPI_TMOD_MASK (0x3 << SPI_TMOD_OFFSET)
+#define SPI_TMOD_TR 0x0 /* xmit & recv */
+#define SPI_TMOD_TO 0x1 /* xmit only */
+#define SPI_TMOD_RO 0x2 /* recv only */
+#define SPI_TMOD_EPROMREAD 0x3 /* eeprom read mode */
+
+#define SPI_SLVOE_OFFSET 10
+#define SPI_SRL_OFFSET 11
+#define SPI_CFS_OFFSET 12
+
+/* Bit fields in SR, 7 bits */
+#define SR_MASK 0x7f /* cover 7 bits */
+#define SR_BUSY (1 << 0)
+#define SR_TF_NOT_FULL (1 << 1)
+#define SR_TF_EMPT (1 << 2)
+#define SR_RF_NOT_EMPT (1 << 3)
+#define SR_RF_FULL (1 << 4)
+#define SR_TX_ERR (1 << 5)
+#define SR_DCOL (1 << 6)
+
+/* Bit fields in ISR, IMR, RISR, 7 bits */
+#define SPI_INT_TXEI (1 << 0)
+#define SPI_INT_TXOI (1 << 1)
+#define SPI_INT_RXUI (1 << 2)
+#define SPI_INT_RXOI (1 << 3)
+#define SPI_INT_RXFI (1 << 4)
+#define SPI_INT_MSTI (1 << 5)
+
+/* Bit fields in DMACR */
+#define SPI_DMA_RDMAE (1 << 0)
+#define SPI_DMA_TDMAE (1 << 1)
+
+/* TX RX interrupt level threshold, max can be 256 */
+#define SPI_INT_THRESHOLD 32
+
+enum dw_ssi_type {
+ SSI_MOTO_SPI = 0,
+ SSI_TI_SSP,
+ SSI_NS_MICROWIRE,
+};
+
+struct dw_spi;
+struct dw_spi_dma_ops {
+ int (*dma_init)(struct dw_spi *dws);
+ void (*dma_exit)(struct dw_spi *dws);
+ int (*dma_setup)(struct dw_spi *dws, struct spi_transfer *xfer);
+ bool (*can_dma)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ int (*dma_transfer)(struct dw_spi *dws, struct spi_transfer *xfer);
+ void (*dma_stop)(struct dw_spi *dws);
+};
+
+struct dw_spi {
+ struct spi_master *master;
+ enum dw_ssi_type type;
+ char name[16];
+
+ void __iomem *regs;
+ unsigned long paddr;
+ int irq;
+ u32 fifo_len; /* depth of the FIFO buffer */
+ u32 max_freq; /* max bus freq supported */
+
+ u16 bus_num;
+ u16 num_cs; /* supported slave numbers */
+
+ /* Current message transfer state info */
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ u8 n_bytes; /* current is a 1/2 bytes op */
+ u32 dma_width;
+ irqreturn_t (*transfer_handler)(struct dw_spi *dws);
+
+ /* DMA info */
+ int dma_inited;
+ struct dma_chan *txchan;
+ struct dma_chan *rxchan;
+ unsigned long dma_chan_busy;
+ dma_addr_t dma_addr; /* phy address of the Data register */
+ struct dw_spi_dma_ops *dma_ops;
+ void *dma_tx;
+ void *dma_rx;
+
+ /* Bus interface info */
+ void *priv;
+#ifdef CONFIG_DEBUG_FS
+ struct dentry *debugfs;
+#endif
+};
+
+static inline u32 dw_readl(struct dw_spi *dws, u32 offset)
+{
+ return __raw_readl(dws->regs + offset);
+}
+
+static inline void dw_writel(struct dw_spi *dws, u32 offset, u32 val)
+{
+ __raw_writel(val, dws->regs + offset);
+}
+
+static inline void spi_enable_chip(struct dw_spi *dws, int enable)
+{
+ dw_writel(dws, DW_SPI_SSIENR, (enable ? 1 : 0));
+}
+
+static inline void spi_set_clk(struct dw_spi *dws, u16 div)
+{
+ dw_writel(dws, DW_SPI_BAUDR, div);
+}
+
+/* Disable IRQ bits */
+static inline void spi_mask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, DW_SPI_IMR) & ~mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
+}
+
+/* Enable IRQ bits */
+static inline void spi_umask_intr(struct dw_spi *dws, u32 mask)
+{
+ u32 new_mask;
+
+ new_mask = dw_readl(dws, DW_SPI_IMR) | mask;
+ dw_writel(dws, DW_SPI_IMR, new_mask);
+}
+
+/*
+ * This does disable the SPI controller, interrupts, and re-enable the
+ * controller back. Transmit and receive FIFO buffers are cleared when the
+ * device is disabled.
+ */
+static inline void spi_reset_chip(struct dw_spi *dws)
+{
+ spi_enable_chip(dws, 0);
+ spi_mask_intr(dws, 0xff);
+ spi_enable_chip(dws, 1);
+}
+
+/*
+ * Each SPI slave device to work with dw_api controller should
+ * has such a structure claiming its working mode (poll or PIO/DMA),
+ * which can be save in the "controller_data" member of the
+ * struct spi_device.
+ */
+struct dw_spi_chip {
+ u8 poll_mode; /* 1 for controller polling mode */
+ u8 type; /* SPI/SSP/MicroWire */
+ void (*cs_control)(u32 command);
+};
+
+extern int dw_spi_add_host(struct device *dev, struct dw_spi *dws);
+extern void dw_spi_remove_host(struct dw_spi *dws);
+extern int dw_spi_suspend_host(struct dw_spi *dws);
+extern int dw_spi_resume_host(struct dw_spi *dws);
+
+/* platform related setup */
+extern int dw_spi_mid_init(struct dw_spi *dws); /* Intel MID platforms */
+#endif /* DW_SPI_HEADER_H */
diff --git a/drivers/spi/spi-efm32.c b/drivers/spi/spi-efm32.c
new file mode 100644
index 000000000..065fe8744
--- /dev/null
+++ b/drivers/spi/spi-efm32.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (C) 2012-2013 Uwe Kleine-Koenig for Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it under
+ * the terms of the GNU General Public License version 2 as published by the
+ * Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_data/efm32-spi.h>
+
+#define DRIVER_NAME "efm32-spi"
+
+#define MASK_VAL(mask, val) ((val << __ffs(mask)) & mask)
+
+#define REG_CTRL 0x00
+#define REG_CTRL_SYNC 0x0001
+#define REG_CTRL_CLKPOL 0x0100
+#define REG_CTRL_CLKPHA 0x0200
+#define REG_CTRL_MSBF 0x0400
+#define REG_CTRL_TXBIL 0x1000
+
+#define REG_FRAME 0x04
+#define REG_FRAME_DATABITS__MASK 0x000f
+#define REG_FRAME_DATABITS(n) ((n) - 3)
+
+#define REG_CMD 0x0c
+#define REG_CMD_RXEN 0x0001
+#define REG_CMD_RXDIS 0x0002
+#define REG_CMD_TXEN 0x0004
+#define REG_CMD_TXDIS 0x0008
+#define REG_CMD_MASTEREN 0x0010
+
+#define REG_STATUS 0x10
+#define REG_STATUS_TXENS 0x0002
+#define REG_STATUS_TXC 0x0020
+#define REG_STATUS_TXBL 0x0040
+#define REG_STATUS_RXDATAV 0x0080
+
+#define REG_CLKDIV 0x14
+
+#define REG_RXDATAX 0x18
+#define REG_RXDATAX_RXDATA__MASK 0x01ff
+#define REG_RXDATAX_PERR 0x4000
+#define REG_RXDATAX_FERR 0x8000
+
+#define REG_TXDATA 0x34
+
+#define REG_IF 0x40
+#define REG_IF_TXBL 0x0002
+#define REG_IF_RXDATAV 0x0004
+
+#define REG_IFS 0x44
+#define REG_IFC 0x48
+#define REG_IEN 0x4c
+
+#define REG_ROUTE 0x54
+#define REG_ROUTE_RXPEN 0x0001
+#define REG_ROUTE_TXPEN 0x0002
+#define REG_ROUTE_CLKPEN 0x0008
+#define REG_ROUTE_LOCATION__MASK 0x0700
+#define REG_ROUTE_LOCATION(n) MASK_VAL(REG_ROUTE_LOCATION__MASK, (n))
+
+struct efm32_spi_ddata {
+ struct spi_bitbang bitbang;
+
+ spinlock_t lock;
+
+ struct clk *clk;
+ void __iomem *base;
+ unsigned int rxirq, txirq;
+ struct efm32_spi_pdata pdata;
+
+ /* irq data */
+ struct completion done;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ unsigned tx_len, rx_len;
+
+ /* chip selects */
+ unsigned csgpio[];
+};
+
+#define ddata_to_dev(ddata) (&(ddata->bitbang.master->dev))
+#define efm32_spi_vdbg(ddata, format, arg...) \
+ dev_vdbg(ddata_to_dev(ddata), format, ##arg)
+
+static void efm32_spi_write32(struct efm32_spi_ddata *ddata,
+ u32 value, unsigned offset)
+{
+ writel_relaxed(value, ddata->base + offset);
+}
+
+static u32 efm32_spi_read32(struct efm32_spi_ddata *ddata, unsigned offset)
+{
+ return readl_relaxed(ddata->base + offset);
+}
+
+static void efm32_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int value = !(spi->mode & SPI_CS_HIGH) == !(is_on == BITBANG_CS_ACTIVE);
+
+ gpio_set_value(ddata->csgpio[spi->chip_select], value);
+}
+
+static int efm32_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+
+ unsigned bpw = t->bits_per_word ?: spi->bits_per_word;
+ unsigned speed = t->speed_hz ?: spi->max_speed_hz;
+ unsigned long clkfreq = clk_get_rate(ddata->clk);
+ u32 clkdiv;
+
+ efm32_spi_write32(ddata, REG_CTRL_SYNC | REG_CTRL_MSBF |
+ (spi->mode & SPI_CPHA ? REG_CTRL_CLKPHA : 0) |
+ (spi->mode & SPI_CPOL ? REG_CTRL_CLKPOL : 0), REG_CTRL);
+
+ efm32_spi_write32(ddata,
+ REG_FRAME_DATABITS(bpw), REG_FRAME);
+
+ if (2 * speed >= clkfreq)
+ clkdiv = 0;
+ else
+ clkdiv = 64 * (DIV_ROUND_UP(2 * clkfreq, speed) - 4);
+
+ if (clkdiv > (1U << 21))
+ return -EINVAL;
+
+ efm32_spi_write32(ddata, clkdiv, REG_CLKDIV);
+ efm32_spi_write32(ddata, REG_CMD_MASTEREN, REG_CMD);
+ efm32_spi_write32(ddata, REG_CMD_RXEN | REG_CMD_TXEN, REG_CMD);
+
+ return 0;
+}
+
+static void efm32_spi_tx_u8(struct efm32_spi_ddata *ddata)
+{
+ u8 val = 0;
+
+ if (ddata->tx_buf) {
+ val = *ddata->tx_buf;
+ ddata->tx_buf++;
+ }
+
+ ddata->tx_len--;
+ efm32_spi_write32(ddata, val, REG_TXDATA);
+ efm32_spi_vdbg(ddata, "%s: tx 0x%x\n", __func__, val);
+}
+
+static void efm32_spi_rx_u8(struct efm32_spi_ddata *ddata)
+{
+ u32 rxdata = efm32_spi_read32(ddata, REG_RXDATAX);
+ efm32_spi_vdbg(ddata, "%s: rx 0x%x\n", __func__, rxdata);
+
+ if (ddata->rx_buf) {
+ *ddata->rx_buf = rxdata;
+ ddata->rx_buf++;
+ }
+
+ ddata->rx_len--;
+}
+
+static void efm32_spi_filltx(struct efm32_spi_ddata *ddata)
+{
+ while (ddata->tx_len &&
+ ddata->tx_len + 2 > ddata->rx_len &&
+ efm32_spi_read32(ddata, REG_STATUS) & REG_STATUS_TXBL) {
+ efm32_spi_tx_u8(ddata);
+ }
+}
+
+static int efm32_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(spi->master);
+ int ret = -EBUSY;
+
+ spin_lock_irq(&ddata->lock);
+
+ if (ddata->tx_buf || ddata->rx_buf)
+ goto out_unlock;
+
+ ddata->tx_buf = t->tx_buf;
+ ddata->rx_buf = t->rx_buf;
+ ddata->tx_len = ddata->rx_len =
+ t->len * DIV_ROUND_UP(t->bits_per_word, 8);
+
+ efm32_spi_filltx(ddata);
+
+ reinit_completion(&ddata->done);
+
+ efm32_spi_write32(ddata, REG_IF_TXBL | REG_IF_RXDATAV, REG_IEN);
+
+ spin_unlock_irq(&ddata->lock);
+
+ wait_for_completion(&ddata->done);
+
+ spin_lock_irq(&ddata->lock);
+
+ ret = t->len - max(ddata->tx_len, ddata->rx_len);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ ddata->tx_buf = ddata->rx_buf = NULL;
+
+out_unlock:
+ spin_unlock_irq(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_rxirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+ irqreturn_t ret = IRQ_NONE;
+
+ spin_lock(&ddata->lock);
+
+ while (ddata->rx_len > 0 &&
+ efm32_spi_read32(ddata, REG_STATUS) &
+ REG_STATUS_RXDATAV) {
+ efm32_spi_rx_u8(ddata);
+
+ ret = IRQ_HANDLED;
+ }
+
+ if (!ddata->rx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_RXDATAV;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+
+ complete(&ddata->done);
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return ret;
+}
+
+static irqreturn_t efm32_spi_txirq(int irq, void *data)
+{
+ struct efm32_spi_ddata *ddata = data;
+
+ efm32_spi_vdbg(ddata,
+ "%s: txlen = %u, rxlen = %u, if=0x%08x, stat=0x%08x\n",
+ __func__, ddata->tx_len, ddata->rx_len,
+ efm32_spi_read32(ddata, REG_IF),
+ efm32_spi_read32(ddata, REG_STATUS));
+
+ spin_lock(&ddata->lock);
+
+ efm32_spi_filltx(ddata);
+
+ efm32_spi_vdbg(ddata, "%s: txlen = %u, rxlen = %u\n",
+ __func__, ddata->tx_len, ddata->rx_len);
+
+ if (!ddata->tx_len) {
+ u32 ien = efm32_spi_read32(ddata, REG_IEN);
+
+ ien &= ~REG_IF_TXBL;
+
+ efm32_spi_write32(ddata, ien, REG_IEN);
+ efm32_spi_vdbg(ddata, "disable TXBL\n");
+ }
+
+ spin_unlock(&ddata->lock);
+
+ return IRQ_HANDLED;
+}
+
+static u32 efm32_spi_get_configured_location(struct efm32_spi_ddata *ddata)
+{
+ u32 reg = efm32_spi_read32(ddata, REG_ROUTE);
+
+ return (reg & REG_ROUTE_LOCATION__MASK) >> __ffs(REG_ROUTE_LOCATION__MASK);
+}
+
+static void efm32_spi_probe_dt(struct platform_device *pdev,
+ struct spi_master *master, struct efm32_spi_ddata *ddata)
+{
+ struct device_node *np = pdev->dev.of_node;
+ u32 location;
+ int ret;
+
+ ret = of_property_read_u32(np, "energymicro,location", &location);
+
+ if (ret)
+ /* fall back to wrongly namespaced property */
+ ret = of_property_read_u32(np, "efm32,location", &location);
+
+ if (ret)
+ /* fall back to old and (wrongly) generic property "location" */
+ ret = of_property_read_u32(np, "location", &location);
+
+ if (!ret) {
+ dev_dbg(&pdev->dev, "using location %u\n", location);
+ } else {
+ /* default to location configured in hardware */
+ location = efm32_spi_get_configured_location(ddata);
+
+ dev_info(&pdev->dev, "fall back to location %u\n", location);
+ }
+
+ ddata->pdata.location = location;
+}
+
+static int efm32_spi_probe(struct platform_device *pdev)
+{
+ struct efm32_spi_ddata *ddata;
+ struct resource *res;
+ int ret;
+ struct spi_master *master;
+ struct device_node *np = pdev->dev.of_node;
+ int num_cs, i;
+
+ if (!np)
+ return -EINVAL;
+
+ num_cs = of_gpio_named_count(np, "cs-gpios");
+ if (num_cs < 0)
+ return num_cs;
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(*ddata) + num_cs * sizeof(unsigned));
+ if (!master) {
+ dev_dbg(&pdev->dev,
+ "failed to allocate spi master controller\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+
+ master->dev.of_node = pdev->dev.of_node;
+
+ master->num_chipselect = num_cs;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
+ ddata = spi_master_get_devdata(master);
+
+ ddata->bitbang.master = master;
+ ddata->bitbang.chipselect = efm32_spi_chipselect;
+ ddata->bitbang.setup_transfer = efm32_spi_setup_transfer;
+ ddata->bitbang.txrx_bufs = efm32_spi_txrx_bufs;
+
+ spin_lock_init(&ddata->lock);
+ init_completion(&ddata->done);
+
+ ddata->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(ddata->clk)) {
+ ret = PTR_ERR(ddata->clk);
+ dev_err(&pdev->dev, "failed to get clock: %d\n", ret);
+ goto err;
+ }
+
+ for (i = 0; i < num_cs; ++i) {
+ ret = of_get_named_gpio(np, "cs-gpios", i);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ ddata->csgpio[i] = ret;
+ dev_dbg(&pdev->dev, "csgpio#%u = %u\n", i, ddata->csgpio[i]);
+ ret = devm_gpio_request_one(&pdev->dev, ddata->csgpio[i],
+ GPIOF_OUT_INIT_LOW, DRIVER_NAME);
+ if (ret < 0) {
+ dev_err(&pdev->dev,
+ "failed to configure csgpio#%u (%d)\n",
+ i, ret);
+ goto err;
+ }
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ ret = -ENODEV;
+ dev_err(&pdev->dev, "failed to determine base address\n");
+ goto err;
+ }
+
+ if (resource_size(res) < 0x60) {
+ ret = -EINVAL;
+ dev_err(&pdev->dev, "memory resource too small\n");
+ goto err;
+ }
+
+ ddata->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ddata->base)) {
+ ret = PTR_ERR(ddata->base);
+ goto err;
+ }
+
+ ret = platform_get_irq(pdev, 0);
+ if (ret <= 0) {
+ dev_err(&pdev->dev, "failed to get rx irq (%d)\n", ret);
+ goto err;
+ }
+
+ ddata->rxirq = ret;
+
+ ret = platform_get_irq(pdev, 1);
+ if (ret <= 0)
+ ret = ddata->rxirq + 1;
+
+ ddata->txirq = ret;
+
+ ret = clk_prepare_enable(ddata->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to enable clock (%d)\n", ret);
+ goto err;
+ }
+
+ efm32_spi_probe_dt(pdev, master, ddata);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+ efm32_spi_write32(ddata, REG_ROUTE_TXPEN | REG_ROUTE_RXPEN |
+ REG_ROUTE_CLKPEN |
+ REG_ROUTE_LOCATION(ddata->pdata.location), REG_ROUTE);
+
+ ret = request_irq(ddata->rxirq, efm32_spi_rxirq,
+ 0, DRIVER_NAME " rx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register rxirq (%d)\n", ret);
+ goto err_disable_clk;
+ }
+
+ ret = request_irq(ddata->txirq, efm32_spi_txirq,
+ 0, DRIVER_NAME " tx", ddata);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to register txirq (%d)\n", ret);
+ goto err_free_rx_irq;
+ }
+
+ ret = spi_bitbang_start(&ddata->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed (%d)\n", ret);
+
+ free_irq(ddata->txirq, ddata);
+err_free_rx_irq:
+ free_irq(ddata->rxirq, ddata);
+err_disable_clk:
+ clk_disable_unprepare(ddata->clk);
+err:
+ spi_master_put(master);
+ }
+
+ return ret;
+}
+
+static int efm32_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct efm32_spi_ddata *ddata = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&ddata->bitbang);
+
+ efm32_spi_write32(ddata, 0, REG_IEN);
+
+ free_irq(ddata->txirq, ddata);
+ free_irq(ddata->rxirq, ddata);
+ clk_disable_unprepare(ddata->clk);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id efm32_spi_dt_ids[] = {
+ {
+ .compatible = "energymicro,efm32-spi",
+ }, {
+ /* doesn't follow the "vendor,device" scheme, don't use */
+ .compatible = "efm32,spi",
+ }, {
+ /* sentinel */
+ }
+};
+MODULE_DEVICE_TABLE(of, efm32_spi_dt_ids);
+
+static struct platform_driver efm32_spi_driver = {
+ .probe = efm32_spi_probe,
+ .remove = efm32_spi_remove,
+
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = efm32_spi_dt_ids,
+ },
+};
+module_platform_driver(efm32_spi_driver);
+
+MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
+MODULE_DESCRIPTION("EFM32 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-ep93xx.c b/drivers/spi/spi-ep93xx.c
new file mode 100644
index 000000000..bb00be8d1
--- /dev/null
+++ b/drivers/spi/spi-ep93xx.c
@@ -0,0 +1,976 @@
+/*
+ * Driver for Cirrus Logic EP93xx SPI controller.
+ *
+ * Copyright (C) 2010-2011 Mika Westerberg
+ *
+ * Explicit FIFO handling code was inspired by amba-pl022 driver.
+ *
+ * Chip select support using other than built-in GPIOs by H. Hartley Sweeten.
+ *
+ * For more information about the SPI controller see documentation on Cirrus
+ * Logic web site:
+ * http://www.cirrus.com/en/pubs/manual/EP93xx_Users_Guide_UM1.pdf
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dmaengine.h>
+#include <linux/bitops.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/scatterlist.h>
+#include <linux/spi/spi.h>
+
+#include <linux/platform_data/dma-ep93xx.h>
+#include <linux/platform_data/spi-ep93xx.h>
+
+#define SSPCR0 0x0000
+#define SSPCR0_MODE_SHIFT 6
+#define SSPCR0_SCR_SHIFT 8
+
+#define SSPCR1 0x0004
+#define SSPCR1_RIE BIT(0)
+#define SSPCR1_TIE BIT(1)
+#define SSPCR1_RORIE BIT(2)
+#define SSPCR1_LBM BIT(3)
+#define SSPCR1_SSE BIT(4)
+#define SSPCR1_MS BIT(5)
+#define SSPCR1_SOD BIT(6)
+
+#define SSPDR 0x0008
+
+#define SSPSR 0x000c
+#define SSPSR_TFE BIT(0)
+#define SSPSR_TNF BIT(1)
+#define SSPSR_RNE BIT(2)
+#define SSPSR_RFF BIT(3)
+#define SSPSR_BSY BIT(4)
+#define SSPCPSR 0x0010
+
+#define SSPIIR 0x0014
+#define SSPIIR_RIS BIT(0)
+#define SSPIIR_TIS BIT(1)
+#define SSPIIR_RORIS BIT(2)
+#define SSPICR SSPIIR
+
+/* timeout in milliseconds */
+#define SPI_TIMEOUT 5
+/* maximum depth of RX/TX FIFO */
+#define SPI_FIFO_SIZE 8
+
+/**
+ * struct ep93xx_spi - EP93xx SPI controller structure
+ * @pdev: pointer to platform device
+ * @clk: clock for the controller
+ * @regs_base: pointer to ioremap()'d registers
+ * @sspdr_phys: physical address of the SSPDR register
+ * @wait: wait here until given transfer is completed
+ * @current_msg: message that is currently processed (or %NULL if none)
+ * @tx: current byte in transfer to transmit
+ * @rx: current byte in transfer to receive
+ * @fifo_level: how full is FIFO (%0..%SPI_FIFO_SIZE - %1). Receiving one
+ * frame decreases this level and sending one frame increases it.
+ * @dma_rx: RX DMA channel
+ * @dma_tx: TX DMA channel
+ * @dma_rx_data: RX parameters passed to the DMA engine
+ * @dma_tx_data: TX parameters passed to the DMA engine
+ * @rx_sgt: sg table for RX transfers
+ * @tx_sgt: sg table for TX transfers
+ * @zeropage: dummy page used as RX buffer when only TX buffer is passed in by
+ * the client
+ */
+struct ep93xx_spi {
+ const struct platform_device *pdev;
+ struct clk *clk;
+ void __iomem *regs_base;
+ unsigned long sspdr_phys;
+ struct completion wait;
+ struct spi_message *current_msg;
+ size_t tx;
+ size_t rx;
+ size_t fifo_level;
+ struct dma_chan *dma_rx;
+ struct dma_chan *dma_tx;
+ struct ep93xx_dma_data dma_rx_data;
+ struct ep93xx_dma_data dma_tx_data;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ void *zeropage;
+};
+
+/**
+ * struct ep93xx_spi_chip - SPI device hardware settings
+ * @spi: back pointer to the SPI device
+ * @ops: private chip operations
+ */
+struct ep93xx_spi_chip {
+ const struct spi_device *spi;
+ struct ep93xx_spi_chip_ops *ops;
+};
+
+/* converts bits per word to CR0.DSS value */
+#define bits_per_word_to_dss(bpw) ((bpw) - 1)
+
+static void ep93xx_spi_write_u8(const struct ep93xx_spi *espi,
+ u16 reg, u8 value)
+{
+ writeb(value, espi->regs_base + reg);
+}
+
+static u8 ep93xx_spi_read_u8(const struct ep93xx_spi *spi, u16 reg)
+{
+ return readb(spi->regs_base + reg);
+}
+
+static void ep93xx_spi_write_u16(const struct ep93xx_spi *espi,
+ u16 reg, u16 value)
+{
+ writew(value, espi->regs_base + reg);
+}
+
+static u16 ep93xx_spi_read_u16(const struct ep93xx_spi *spi, u16 reg)
+{
+ return readw(spi->regs_base + reg);
+}
+
+static int ep93xx_spi_enable(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+ int err;
+
+ err = clk_enable(espi->clk);
+ if (err)
+ return err;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval |= SSPCR1_SSE;
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+
+ return 0;
+}
+
+static void ep93xx_spi_disable(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval &= ~SSPCR1_SSE;
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+
+ clk_disable(espi->clk);
+}
+
+static void ep93xx_spi_enable_interrupts(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval |= (SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+}
+
+static void ep93xx_spi_disable_interrupts(const struct ep93xx_spi *espi)
+{
+ u8 regval;
+
+ regval = ep93xx_spi_read_u8(espi, SSPCR1);
+ regval &= ~(SSPCR1_RORIE | SSPCR1_TIE | SSPCR1_RIE);
+ ep93xx_spi_write_u8(espi, SSPCR1, regval);
+}
+
+/**
+ * ep93xx_spi_calc_divisors() - calculates SPI clock divisors
+ * @espi: ep93xx SPI controller struct
+ * @rate: desired SPI output clock rate
+ * @div_cpsr: pointer to return the cpsr (pre-scaler) divider
+ * @div_scr: pointer to return the scr divider
+ */
+static int ep93xx_spi_calc_divisors(const struct ep93xx_spi *espi,
+ u32 rate, u8 *div_cpsr, u8 *div_scr)
+{
+ struct spi_master *master = platform_get_drvdata(espi->pdev);
+ unsigned long spi_clk_rate = clk_get_rate(espi->clk);
+ int cpsr, scr;
+
+ /*
+ * Make sure that max value is between values supported by the
+ * controller. Note that minimum value is already checked in
+ * ep93xx_spi_transfer_one_message().
+ */
+ rate = clamp(rate, master->min_speed_hz, master->max_speed_hz);
+
+ /*
+ * Calculate divisors so that we can get speed according the
+ * following formula:
+ * rate = spi_clock_rate / (cpsr * (1 + scr))
+ *
+ * cpsr must be even number and starts from 2, scr can be any number
+ * between 0 and 255.
+ */
+ for (cpsr = 2; cpsr <= 254; cpsr += 2) {
+ for (scr = 0; scr <= 255; scr++) {
+ if ((spi_clk_rate / (cpsr * (scr + 1))) <= rate) {
+ *div_scr = (u8)scr;
+ *div_cpsr = (u8)cpsr;
+ return 0;
+ }
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void ep93xx_spi_cs_control(struct spi_device *spi, bool control)
+{
+ struct ep93xx_spi_chip *chip = spi_get_ctldata(spi);
+ int value = (spi->mode & SPI_CS_HIGH) ? control : !control;
+
+ if (chip->ops && chip->ops->cs_control)
+ chip->ops->cs_control(spi, value);
+}
+
+/**
+ * ep93xx_spi_setup() - setup an SPI device
+ * @spi: SPI device to setup
+ *
+ * This function sets up SPI device mode, speed etc. Can be called multiple
+ * times for a single device. Returns %0 in case of success, negative error in
+ * case of failure. When this function returns success, the device is
+ * deselected.
+ */
+static int ep93xx_spi_setup(struct spi_device *spi)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(spi->master);
+ struct ep93xx_spi_chip *chip;
+
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ dev_dbg(&espi->pdev->dev, "initial setup for %s\n",
+ spi->modalias);
+
+ chip = kzalloc(sizeof(*chip), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ chip->spi = spi;
+ chip->ops = spi->controller_data;
+
+ if (chip->ops && chip->ops->setup) {
+ int ret = chip->ops->setup(spi);
+
+ if (ret) {
+ kfree(chip);
+ return ret;
+ }
+ }
+
+ spi_set_ctldata(spi, chip);
+ }
+
+ ep93xx_spi_cs_control(spi, false);
+ return 0;
+}
+
+/**
+ * ep93xx_spi_cleanup() - cleans up master controller specific state
+ * @spi: SPI device to cleanup
+ *
+ * This function releases master controller specific state for given @spi
+ * device.
+ */
+static void ep93xx_spi_cleanup(struct spi_device *spi)
+{
+ struct ep93xx_spi_chip *chip;
+
+ chip = spi_get_ctldata(spi);
+ if (chip) {
+ if (chip->ops && chip->ops->cleanup)
+ chip->ops->cleanup(spi);
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ }
+}
+
+/**
+ * ep93xx_spi_chip_setup() - configures hardware according to given @chip
+ * @espi: ep93xx SPI controller struct
+ * @chip: chip specific settings
+ * @speed_hz: transfer speed
+ * @bits_per_word: transfer bits_per_word
+ */
+static int ep93xx_spi_chip_setup(const struct ep93xx_spi *espi,
+ const struct ep93xx_spi_chip *chip,
+ u32 speed_hz, u8 bits_per_word)
+{
+ u8 dss = bits_per_word_to_dss(bits_per_word);
+ u8 div_cpsr = 0;
+ u8 div_scr = 0;
+ u16 cr0;
+ int err;
+
+ err = ep93xx_spi_calc_divisors(espi, speed_hz, &div_cpsr, &div_scr);
+ if (err)
+ return err;
+
+ cr0 = div_scr << SSPCR0_SCR_SHIFT;
+ cr0 |= (chip->spi->mode & (SPI_CPHA|SPI_CPOL)) << SSPCR0_MODE_SHIFT;
+ cr0 |= dss;
+
+ dev_dbg(&espi->pdev->dev, "setup: mode %d, cpsr %d, scr %d, dss %d\n",
+ chip->spi->mode, div_cpsr, div_scr, dss);
+ dev_dbg(&espi->pdev->dev, "setup: cr0 %#x\n", cr0);
+
+ ep93xx_spi_write_u8(espi, SSPCPSR, div_cpsr);
+ ep93xx_spi_write_u16(espi, SSPCR0, cr0);
+
+ return 0;
+}
+
+static void ep93xx_do_write(struct ep93xx_spi *espi, struct spi_transfer *t)
+{
+ if (t->bits_per_word > 8) {
+ u16 tx_val = 0;
+
+ if (t->tx_buf)
+ tx_val = ((u16 *)t->tx_buf)[espi->tx];
+ ep93xx_spi_write_u16(espi, SSPDR, tx_val);
+ espi->tx += sizeof(tx_val);
+ } else {
+ u8 tx_val = 0;
+
+ if (t->tx_buf)
+ tx_val = ((u8 *)t->tx_buf)[espi->tx];
+ ep93xx_spi_write_u8(espi, SSPDR, tx_val);
+ espi->tx += sizeof(tx_val);
+ }
+}
+
+static void ep93xx_do_read(struct ep93xx_spi *espi, struct spi_transfer *t)
+{
+ if (t->bits_per_word > 8) {
+ u16 rx_val;
+
+ rx_val = ep93xx_spi_read_u16(espi, SSPDR);
+ if (t->rx_buf)
+ ((u16 *)t->rx_buf)[espi->rx] = rx_val;
+ espi->rx += sizeof(rx_val);
+ } else {
+ u8 rx_val;
+
+ rx_val = ep93xx_spi_read_u8(espi, SSPDR);
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[espi->rx] = rx_val;
+ espi->rx += sizeof(rx_val);
+ }
+}
+
+/**
+ * ep93xx_spi_read_write() - perform next RX/TX transfer
+ * @espi: ep93xx SPI controller struct
+ *
+ * This function transfers next bytes (or half-words) to/from RX/TX FIFOs. If
+ * called several times, the whole transfer will be completed. Returns
+ * %-EINPROGRESS when current transfer was not yet completed otherwise %0.
+ *
+ * When this function is finished, RX FIFO should be empty and TX FIFO should be
+ * full.
+ */
+static int ep93xx_spi_read_write(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct spi_transfer *t = msg->state;
+
+ /* read as long as RX FIFO has frames in it */
+ while ((ep93xx_spi_read_u8(espi, SSPSR) & SSPSR_RNE)) {
+ ep93xx_do_read(espi, t);
+ espi->fifo_level--;
+ }
+
+ /* write as long as TX FIFO has room */
+ while (espi->fifo_level < SPI_FIFO_SIZE && espi->tx < t->len) {
+ ep93xx_do_write(espi, t);
+ espi->fifo_level++;
+ }
+
+ if (espi->rx == t->len)
+ return 0;
+
+ return -EINPROGRESS;
+}
+
+static void ep93xx_spi_pio_transfer(struct ep93xx_spi *espi)
+{
+ /*
+ * Now everything is set up for the current transfer. We prime the TX
+ * FIFO, enable interrupts, and wait for the transfer to complete.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ ep93xx_spi_enable_interrupts(espi);
+ wait_for_completion(&espi->wait);
+ }
+}
+
+/**
+ * ep93xx_spi_dma_prepare() - prepares a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function configures the DMA, maps the buffer and prepares the DMA
+ * descriptor. Returns a valid DMA descriptor in case of success and ERR_PTR
+ * in case of failure.
+ */
+static struct dma_async_tx_descriptor *
+ep93xx_spi_dma_prepare(struct ep93xx_spi *espi, enum dma_transfer_direction dir)
+{
+ struct spi_transfer *t = espi->current_msg->state;
+ struct dma_async_tx_descriptor *txd;
+ enum dma_slave_buswidth buswidth;
+ struct dma_slave_config conf;
+ struct scatterlist *sg;
+ struct sg_table *sgt;
+ struct dma_chan *chan;
+ const void *buf, *pbuf;
+ size_t len = t->len;
+ int i, ret, nents;
+
+ if (t->bits_per_word > 8)
+ buswidth = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ else
+ buswidth = DMA_SLAVE_BUSWIDTH_1_BYTE;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.direction = dir;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ chan = espi->dma_rx;
+ buf = t->rx_buf;
+ sgt = &espi->rx_sgt;
+
+ conf.src_addr = espi->sspdr_phys;
+ conf.src_addr_width = buswidth;
+ } else {
+ chan = espi->dma_tx;
+ buf = t->tx_buf;
+ sgt = &espi->tx_sgt;
+
+ conf.dst_addr = espi->sspdr_phys;
+ conf.dst_addr_width = buswidth;
+ }
+
+ ret = dmaengine_slave_config(chan, &conf);
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * We need to split the transfer into PAGE_SIZE'd chunks. This is
+ * because we are using @espi->zeropage to provide a zero RX buffer
+ * for the TX transfers and we have only allocated one page for that.
+ *
+ * For performance reasons we allocate a new sg_table only when
+ * needed. Otherwise we will re-use the current one. Eventually the
+ * last sg_table is released in ep93xx_spi_release_dma().
+ */
+
+ nents = DIV_ROUND_UP(len, PAGE_SIZE);
+ if (nents != sgt->nents) {
+ sg_free_table(sgt);
+
+ ret = sg_alloc_table(sgt, nents, GFP_KERNEL);
+ if (ret)
+ return ERR_PTR(ret);
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, PAGE_SIZE);
+
+ if (buf) {
+ sg_set_page(sg, virt_to_page(pbuf), bytes,
+ offset_in_page(pbuf));
+ } else {
+ sg_set_page(sg, virt_to_page(espi->zeropage),
+ bytes, 0);
+ }
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ if (WARN_ON(len)) {
+ dev_warn(&espi->pdev->dev, "len = %zu expected 0!\n", len);
+ return ERR_PTR(-EINVAL);
+ }
+
+ nents = dma_map_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return ERR_PTR(-ENOMEM);
+
+ txd = dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir, DMA_CTRL_ACK);
+ if (!txd) {
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+ return ERR_PTR(-ENOMEM);
+ }
+ return txd;
+}
+
+/**
+ * ep93xx_spi_dma_finish() - finishes with a DMA transfer
+ * @espi: ep93xx SPI controller struct
+ * @dir: DMA transfer direction
+ *
+ * Function finishes with the DMA transfer. After this, the DMA buffer is
+ * unmapped.
+ */
+static void ep93xx_spi_dma_finish(struct ep93xx_spi *espi,
+ enum dma_transfer_direction dir)
+{
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+
+ if (dir == DMA_DEV_TO_MEM) {
+ chan = espi->dma_rx;
+ sgt = &espi->rx_sgt;
+ } else {
+ chan = espi->dma_tx;
+ sgt = &espi->tx_sgt;
+ }
+
+ dma_unmap_sg(chan->device->dev, sgt->sgl, sgt->nents, dir);
+}
+
+static void ep93xx_spi_dma_callback(void *callback_param)
+{
+ complete(callback_param);
+}
+
+static void ep93xx_spi_dma_transfer(struct ep93xx_spi *espi)
+{
+ struct spi_message *msg = espi->current_msg;
+ struct dma_async_tx_descriptor *rxd, *txd;
+
+ rxd = ep93xx_spi_dma_prepare(espi, DMA_DEV_TO_MEM);
+ if (IS_ERR(rxd)) {
+ dev_err(&espi->pdev->dev, "DMA RX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(rxd);
+ return;
+ }
+
+ txd = ep93xx_spi_dma_prepare(espi, DMA_MEM_TO_DEV);
+ if (IS_ERR(txd)) {
+ ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
+ dev_err(&espi->pdev->dev, "DMA TX failed: %ld\n", PTR_ERR(rxd));
+ msg->status = PTR_ERR(txd);
+ return;
+ }
+
+ /* We are ready when RX is done */
+ rxd->callback = ep93xx_spi_dma_callback;
+ rxd->callback_param = &espi->wait;
+
+ /* Now submit both descriptors and wait while they finish */
+ dmaengine_submit(rxd);
+ dmaengine_submit(txd);
+
+ dma_async_issue_pending(espi->dma_rx);
+ dma_async_issue_pending(espi->dma_tx);
+
+ wait_for_completion(&espi->wait);
+
+ ep93xx_spi_dma_finish(espi, DMA_MEM_TO_DEV);
+ ep93xx_spi_dma_finish(espi, DMA_DEV_TO_MEM);
+}
+
+/**
+ * ep93xx_spi_process_transfer() - processes one SPI transfer
+ * @espi: ep93xx SPI controller struct
+ * @msg: current message
+ * @t: transfer to process
+ *
+ * This function processes one SPI transfer given in @t. Function waits until
+ * transfer is complete (may sleep) and updates @msg->status based on whether
+ * transfer was successfully processed or not.
+ */
+static void ep93xx_spi_process_transfer(struct ep93xx_spi *espi,
+ struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ struct ep93xx_spi_chip *chip = spi_get_ctldata(msg->spi);
+ int err;
+
+ msg->state = t;
+
+ err = ep93xx_spi_chip_setup(espi, chip, t->speed_hz, t->bits_per_word);
+ if (err) {
+ dev_err(&espi->pdev->dev,
+ "failed to setup chip for transfer\n");
+ msg->status = err;
+ return;
+ }
+
+ espi->rx = 0;
+ espi->tx = 0;
+
+ /*
+ * There is no point of setting up DMA for the transfers which will
+ * fit into the FIFO and can be transferred with a single interrupt.
+ * So in these cases we will be using PIO and don't bother for DMA.
+ */
+ if (espi->dma_rx && t->len > SPI_FIFO_SIZE)
+ ep93xx_spi_dma_transfer(espi);
+ else
+ ep93xx_spi_pio_transfer(espi);
+
+ /*
+ * In case of error during transmit, we bail out from processing
+ * the message.
+ */
+ if (msg->status)
+ return;
+
+ msg->actual_length += t->len;
+
+ /*
+ * After this transfer is finished, perform any possible
+ * post-transfer actions requested by the protocol driver.
+ */
+ if (t->delay_usecs) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ schedule_timeout(usecs_to_jiffies(t->delay_usecs));
+ }
+ if (t->cs_change) {
+ if (!list_is_last(&t->transfer_list, &msg->transfers)) {
+ /*
+ * In case protocol driver is asking us to drop the
+ * chipselect briefly, we let the scheduler to handle
+ * any "delay" here.
+ */
+ ep93xx_spi_cs_control(msg->spi, false);
+ cond_resched();
+ ep93xx_spi_cs_control(msg->spi, true);
+ }
+ }
+}
+
+/*
+ * ep93xx_spi_process_message() - process one SPI message
+ * @espi: ep93xx SPI controller struct
+ * @msg: message to process
+ *
+ * This function processes a single SPI message. We go through all transfers in
+ * the message and pass them to ep93xx_spi_process_transfer(). Chipselect is
+ * asserted during the whole message (unless per transfer cs_change is set).
+ *
+ * @msg->status contains %0 in case of success or negative error code in case of
+ * failure.
+ */
+static void ep93xx_spi_process_message(struct ep93xx_spi *espi,
+ struct spi_message *msg)
+{
+ unsigned long timeout;
+ struct spi_transfer *t;
+ int err;
+
+ /*
+ * Enable the SPI controller and its clock.
+ */
+ err = ep93xx_spi_enable(espi);
+ if (err) {
+ dev_err(&espi->pdev->dev, "failed to enable SPI controller\n");
+ msg->status = err;
+ return;
+ }
+
+ /*
+ * Just to be sure: flush any data from RX FIFO.
+ */
+ timeout = jiffies + msecs_to_jiffies(SPI_TIMEOUT);
+ while (ep93xx_spi_read_u16(espi, SSPSR) & SSPSR_RNE) {
+ if (time_after(jiffies, timeout)) {
+ dev_warn(&espi->pdev->dev,
+ "timeout while flushing RX FIFO\n");
+ msg->status = -ETIMEDOUT;
+ return;
+ }
+ ep93xx_spi_read_u16(espi, SSPDR);
+ }
+
+ /*
+ * We explicitly handle FIFO level. This way we don't have to check TX
+ * FIFO status using %SSPSR_TNF bit which may cause RX FIFO overruns.
+ */
+ espi->fifo_level = 0;
+
+ /*
+ * Assert the chipselect.
+ */
+ ep93xx_spi_cs_control(msg->spi, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+ ep93xx_spi_process_transfer(espi, msg, t);
+ if (msg->status)
+ break;
+ }
+
+ /*
+ * Now the whole message is transferred (or failed for some reason). We
+ * deselect the device and disable the SPI controller.
+ */
+ ep93xx_spi_cs_control(msg->spi, false);
+ ep93xx_spi_disable(espi);
+}
+
+static int ep93xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+
+ msg->state = NULL;
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ espi->current_msg = msg;
+ ep93xx_spi_process_message(espi, msg);
+ espi->current_msg = NULL;
+
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static irqreturn_t ep93xx_spi_interrupt(int irq, void *dev_id)
+{
+ struct ep93xx_spi *espi = dev_id;
+ u8 irq_status = ep93xx_spi_read_u8(espi, SSPIIR);
+
+ /*
+ * If we got ROR (receive overrun) interrupt we know that something is
+ * wrong. Just abort the message.
+ */
+ if (unlikely(irq_status & SSPIIR_RORIS)) {
+ /* clear the overrun interrupt */
+ ep93xx_spi_write_u8(espi, SSPICR, 0);
+ dev_warn(&espi->pdev->dev,
+ "receive overrun, aborting the message\n");
+ espi->current_msg->status = -EIO;
+ } else {
+ /*
+ * Interrupt is either RX (RIS) or TX (TIS). For both cases we
+ * simply execute next data transfer.
+ */
+ if (ep93xx_spi_read_write(espi)) {
+ /*
+ * In normal case, there still is some processing left
+ * for current transfer. Let's wait for the next
+ * interrupt then.
+ */
+ return IRQ_HANDLED;
+ }
+ }
+
+ /*
+ * Current transfer is finished, either with error or with success. In
+ * any case we disable interrupts and notify the worker to handle
+ * any post-processing of the message.
+ */
+ ep93xx_spi_disable_interrupts(espi);
+ complete(&espi->wait);
+ return IRQ_HANDLED;
+}
+
+static bool ep93xx_spi_dma_filter(struct dma_chan *chan, void *filter_param)
+{
+ if (ep93xx_dma_chan_is_m2p(chan))
+ return false;
+
+ chan->private = filter_param;
+ return true;
+}
+
+static int ep93xx_spi_setup_dma(struct ep93xx_spi *espi)
+{
+ dma_cap_mask_t mask;
+ int ret;
+
+ espi->zeropage = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!espi->zeropage)
+ return -ENOMEM;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ espi->dma_rx_data.port = EP93XX_DMA_SSP;
+ espi->dma_rx_data.direction = DMA_DEV_TO_MEM;
+ espi->dma_rx_data.name = "ep93xx-spi-rx";
+
+ espi->dma_rx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_rx_data);
+ if (!espi->dma_rx) {
+ ret = -ENODEV;
+ goto fail_free_page;
+ }
+
+ espi->dma_tx_data.port = EP93XX_DMA_SSP;
+ espi->dma_tx_data.direction = DMA_MEM_TO_DEV;
+ espi->dma_tx_data.name = "ep93xx-spi-tx";
+
+ espi->dma_tx = dma_request_channel(mask, ep93xx_spi_dma_filter,
+ &espi->dma_tx_data);
+ if (!espi->dma_tx) {
+ ret = -ENODEV;
+ goto fail_release_rx;
+ }
+
+ return 0;
+
+fail_release_rx:
+ dma_release_channel(espi->dma_rx);
+ espi->dma_rx = NULL;
+fail_free_page:
+ free_page((unsigned long)espi->zeropage);
+
+ return ret;
+}
+
+static void ep93xx_spi_release_dma(struct ep93xx_spi *espi)
+{
+ if (espi->dma_rx) {
+ dma_release_channel(espi->dma_rx);
+ sg_free_table(&espi->rx_sgt);
+ }
+ if (espi->dma_tx) {
+ dma_release_channel(espi->dma_tx);
+ sg_free_table(&espi->tx_sgt);
+ }
+
+ if (espi->zeropage)
+ free_page((unsigned long)espi->zeropage);
+}
+
+static int ep93xx_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct ep93xx_spi_info *info;
+ struct ep93xx_spi *espi;
+ struct resource *res;
+ int irq;
+ int error;
+
+ info = dev_get_platdata(&pdev->dev);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "failed to get irq resources\n");
+ return -EBUSY;
+ }
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "unable to get iomem resource\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*espi));
+ if (!master)
+ return -ENOMEM;
+
+ master->setup = ep93xx_spi_setup;
+ master->transfer_one_message = ep93xx_spi_transfer_one_message;
+ master->cleanup = ep93xx_spi_cleanup;
+ master->bus_num = pdev->id;
+ master->num_chipselect = info->num_chipselect;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+
+ platform_set_drvdata(pdev, master);
+
+ espi = spi_master_get_devdata(master);
+
+ espi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(espi->clk)) {
+ dev_err(&pdev->dev, "unable to get spi clock\n");
+ error = PTR_ERR(espi->clk);
+ goto fail_release_master;
+ }
+
+ init_completion(&espi->wait);
+
+ /*
+ * Calculate maximum and minimum supported clock rates
+ * for the controller.
+ */
+ master->max_speed_hz = clk_get_rate(espi->clk) / 2;
+ master->min_speed_hz = clk_get_rate(espi->clk) / (254 * 256);
+ espi->pdev = pdev;
+
+ espi->sspdr_phys = res->start + SSPDR;
+
+ espi->regs_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(espi->regs_base)) {
+ error = PTR_ERR(espi->regs_base);
+ goto fail_release_master;
+ }
+
+ error = devm_request_irq(&pdev->dev, irq, ep93xx_spi_interrupt,
+ 0, "ep93xx-spi", espi);
+ if (error) {
+ dev_err(&pdev->dev, "failed to request irq\n");
+ goto fail_release_master;
+ }
+
+ if (info->use_dma && ep93xx_spi_setup_dma(espi))
+ dev_warn(&pdev->dev, "DMA setup failed. Falling back to PIO\n");
+
+ /* make sure that the hardware is disabled */
+ ep93xx_spi_write_u8(espi, SSPCR1, 0);
+
+ error = devm_spi_register_master(&pdev->dev, master);
+ if (error) {
+ dev_err(&pdev->dev, "failed to register SPI master\n");
+ goto fail_free_dma;
+ }
+
+ dev_info(&pdev->dev, "EP93xx SPI Controller at 0x%08lx irq %d\n",
+ (unsigned long)res->start, irq);
+
+ return 0;
+
+fail_free_dma:
+ ep93xx_spi_release_dma(espi);
+fail_release_master:
+ spi_master_put(master);
+
+ return error;
+}
+
+static int ep93xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct ep93xx_spi *espi = spi_master_get_devdata(master);
+
+ ep93xx_spi_release_dma(espi);
+
+ return 0;
+}
+
+static struct platform_driver ep93xx_spi_driver = {
+ .driver = {
+ .name = "ep93xx-spi",
+ },
+ .probe = ep93xx_spi_probe,
+ .remove = ep93xx_spi_remove,
+};
+module_platform_driver(ep93xx_spi_driver);
+
+MODULE_DESCRIPTION("EP93xx SPI Controller driver");
+MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:ep93xx-spi");
diff --git a/drivers/spi/spi-falcon.c b/drivers/spi/spi-falcon.c
new file mode 100644
index 000000000..286b2c81f
--- /dev/null
+++ b/drivers/spi/spi-falcon.c
@@ -0,0 +1,439 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ *
+ * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
+ */
+
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_platform.h>
+
+#include <lantiq_soc.h>
+
+#define DRV_NAME "sflash-falcon"
+
+#define FALCON_SPI_XFER_BEGIN (1 << 0)
+#define FALCON_SPI_XFER_END (1 << 1)
+
+/* Bus Read Configuration Register0 */
+#define BUSRCON0 0x00000010
+/* Bus Write Configuration Register0 */
+#define BUSWCON0 0x00000018
+/* Serial Flash Configuration Register */
+#define SFCON 0x00000080
+/* Serial Flash Time Register */
+#define SFTIME 0x00000084
+/* Serial Flash Status Register */
+#define SFSTAT 0x00000088
+/* Serial Flash Command Register */
+#define SFCMD 0x0000008C
+/* Serial Flash Address Register */
+#define SFADDR 0x00000090
+/* Serial Flash Data Register */
+#define SFDATA 0x00000094
+/* Serial Flash I/O Control Register */
+#define SFIO 0x00000098
+/* EBU Clock Control Register */
+#define EBUCC 0x000000C4
+
+/* Dummy Phase Length */
+#define SFCMD_DUMLEN_OFFSET 16
+#define SFCMD_DUMLEN_MASK 0x000F0000
+/* Chip Select */
+#define SFCMD_CS_OFFSET 24
+#define SFCMD_CS_MASK 0x07000000
+/* field offset */
+#define SFCMD_ALEN_OFFSET 20
+#define SFCMD_ALEN_MASK 0x00700000
+/* SCK Rise-edge Position */
+#define SFTIME_SCKR_POS_OFFSET 8
+#define SFTIME_SCKR_POS_MASK 0x00000F00
+/* SCK Period */
+#define SFTIME_SCK_PER_OFFSET 0
+#define SFTIME_SCK_PER_MASK 0x0000000F
+/* SCK Fall-edge Position */
+#define SFTIME_SCKF_POS_OFFSET 12
+#define SFTIME_SCKF_POS_MASK 0x0000F000
+/* Device Size */
+#define SFCON_DEV_SIZE_A23_0 0x03000000
+#define SFCON_DEV_SIZE_MASK 0x0F000000
+/* Read Data Position */
+#define SFTIME_RD_POS_MASK 0x000F0000
+/* Data Output */
+#define SFIO_UNUSED_WD_MASK 0x0000000F
+/* Command Opcode mask */
+#define SFCMD_OPC_MASK 0x000000FF
+/* dlen bytes of data to write */
+#define SFCMD_DIR_WRITE 0x00000100
+/* Data Length offset */
+#define SFCMD_DLEN_OFFSET 9
+/* Command Error */
+#define SFSTAT_CMD_ERR 0x20000000
+/* Access Command Pending */
+#define SFSTAT_CMD_PEND 0x00400000
+/* Frequency set to 100MHz. */
+#define EBUCC_EBUDIV_SELF100 0x00000001
+/* Serial Flash */
+#define BUSRCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* 8-bit multiplexed */
+#define BUSRCON0_PORTW_8_BIT_MUX 0x00000000
+/* Serial Flash */
+#define BUSWCON0_AGEN_SERIAL_FLASH 0xF0000000
+/* Chip Select after opcode */
+#define SFCMD_KEEP_CS_KEEP_SELECTED 0x00008000
+
+#define CLOCK_100M 100000000
+#define CLOCK_50M 50000000
+
+struct falcon_sflash {
+ u32 sfcmd; /* for caching of opcode, direction, ... */
+ struct spi_master *master;
+};
+
+int falcon_sflash_xfer(struct spi_device *spi, struct spi_transfer *t,
+ unsigned long flags)
+{
+ struct device *dev = &spi->dev;
+ struct falcon_sflash *priv = spi_master_get_devdata(spi->master);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int bytelen = ((8 * t->len + 7) / 8);
+ unsigned int len, alen, dumlen;
+ u32 val;
+ enum {
+ state_init,
+ state_command_prepare,
+ state_write,
+ state_read,
+ state_disable_cs,
+ state_end
+ } state = state_init;
+
+ do {
+ switch (state) {
+ case state_init: /* detect phase of upper layer sequence */
+ {
+ /* initial write ? */
+ if (flags & FALCON_SPI_XFER_BEGIN) {
+ if (!txp) {
+ dev_err(dev,
+ "BEGIN without tx data!\n");
+ return -ENODATA;
+ }
+ /*
+ * Prepare the parts of the sfcmd register,
+ * which should not change during a sequence!
+ * Only exception are the length fields,
+ * especially alen and dumlen.
+ */
+
+ priv->sfcmd = ((spi->chip_select
+ << SFCMD_CS_OFFSET)
+ & SFCMD_CS_MASK);
+ priv->sfcmd |= SFCMD_KEEP_CS_KEEP_SELECTED;
+ priv->sfcmd |= *txp;
+ txp++;
+ bytelen--;
+ if (bytelen) {
+ /*
+ * more data:
+ * maybe address and/or dummy
+ */
+ state = state_command_prepare;
+ break;
+ } else {
+ dev_dbg(dev, "write cmd %02X\n",
+ priv->sfcmd & SFCMD_OPC_MASK);
+ }
+ }
+ /* continued write ? */
+ if (txp && bytelen) {
+ state = state_write;
+ break;
+ }
+ /* read data? */
+ if (rxp && bytelen) {
+ state = state_read;
+ break;
+ }
+ /* end of sequence? */
+ if (flags & FALCON_SPI_XFER_END)
+ state = state_disable_cs;
+ else
+ state = state_end;
+ break;
+ }
+ /* collect tx data for address and dummy phase */
+ case state_command_prepare:
+ {
+ /* txp is valid, already checked */
+ val = 0;
+ alen = 0;
+ dumlen = 0;
+ while (bytelen > 0) {
+ if (alen < 3) {
+ val = (val << 8) | (*txp++);
+ alen++;
+ } else if ((dumlen < 15) && (*txp == 0)) {
+ /*
+ * assume dummy bytes are set to 0
+ * from upper layer
+ */
+ dumlen++;
+ txp++;
+ } else {
+ break;
+ }
+ bytelen--;
+ }
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK | SFCMD_DUMLEN_MASK);
+ priv->sfcmd |= (alen << SFCMD_ALEN_OFFSET) |
+ (dumlen << SFCMD_DUMLEN_OFFSET);
+ if (alen > 0)
+ ltq_ebu_w32(val, SFADDR);
+
+ dev_dbg(dev, "wr %02X, alen=%d (addr=%06X) dlen=%d\n",
+ priv->sfcmd & SFCMD_OPC_MASK,
+ alen, val, dumlen);
+
+ if (bytelen > 0) {
+ /* continue with write */
+ state = state_write;
+ } else if (flags & FALCON_SPI_XFER_END) {
+ /* end of sequence? */
+ state = state_disable_cs;
+ } else {
+ /*
+ * go to end and expect another
+ * call (read or write)
+ */
+ state = state_end;
+ }
+ break;
+ }
+ case state_write:
+ {
+ /* txp still valid */
+ priv->sfcmd |= SFCMD_DIR_WRITE;
+ len = 0;
+ val = 0;
+ do {
+ if (bytelen--)
+ val |= (*txp++) << (8 * len++);
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen == 0)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ if ((len == 4) || (bytelen == 0)) {
+ ltq_ebu_w32(val, SFDATA);
+ ltq_ebu_w32(priv->sfcmd
+ | (len<<SFCMD_DLEN_OFFSET),
+ SFCMD);
+ len = 0;
+ val = 0;
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ }
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_read:
+ {
+ /* read data */
+ priv->sfcmd &= ~SFCMD_DIR_WRITE;
+ do {
+ if ((flags & FALCON_SPI_XFER_END)
+ && (bytelen <= 4)) {
+ priv->sfcmd &=
+ ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ }
+ len = (bytelen > 4) ? 4 : bytelen;
+ bytelen -= len;
+ ltq_ebu_w32(priv->sfcmd
+ | (len << SFCMD_DLEN_OFFSET), SFCMD);
+ priv->sfcmd &= ~(SFCMD_ALEN_MASK
+ | SFCMD_DUMLEN_MASK);
+ do {
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR");
+ dev_err(dev, " (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR,
+ SFSTAT);
+ return -EBADE;
+ }
+ } while (val & SFSTAT_CMD_PEND);
+ val = ltq_ebu_r32(SFDATA);
+ do {
+ *rxp = (val & 0xFF);
+ rxp++;
+ val >>= 8;
+ len--;
+ } while (len);
+ } while (bytelen);
+ state = state_end;
+ break;
+ }
+ case state_disable_cs:
+ {
+ priv->sfcmd &= ~SFCMD_KEEP_CS_KEEP_SELECTED;
+ ltq_ebu_w32(priv->sfcmd | (0 << SFCMD_DLEN_OFFSET),
+ SFCMD);
+ val = ltq_ebu_r32(SFSTAT);
+ if (val & SFSTAT_CMD_ERR) {
+ /* reset error status */
+ dev_err(dev, "SFSTAT: CMD_ERR (%x)\n", val);
+ ltq_ebu_w32(SFSTAT_CMD_ERR, SFSTAT);
+ return -EBADE;
+ }
+ state = state_end;
+ break;
+ }
+ case state_end:
+ break;
+ }
+ } while (state != state_end);
+
+ return 0;
+}
+
+static int falcon_sflash_setup(struct spi_device *spi)
+{
+ unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+
+ if (spi->max_speed_hz >= CLOCK_100M) {
+ /* set EBU clock to 100 MHz */
+ ltq_sys1_w32_mask(0, EBUCC_EBUDIV_SELF100, EBUCC);
+ i = 1; /* divider */
+ } else {
+ /* set EBU clock to 50 MHz */
+ ltq_sys1_w32_mask(EBUCC_EBUDIV_SELF100, 0, EBUCC);
+
+ /* search for suitable divider */
+ for (i = 1; i < 7; i++) {
+ if (CLOCK_50M / i <= spi->max_speed_hz)
+ break;
+ }
+ }
+
+ /* setup period of serial clock */
+ ltq_ebu_w32_mask(SFTIME_SCKF_POS_MASK
+ | SFTIME_SCKR_POS_MASK
+ | SFTIME_SCK_PER_MASK,
+ (i << SFTIME_SCKR_POS_OFFSET)
+ | (i << (SFTIME_SCK_PER_OFFSET + 1)),
+ SFTIME);
+
+ /*
+ * set some bits of unused_wd, to not trigger HOLD/WP
+ * signals on non QUAD flashes
+ */
+ ltq_ebu_w32((SFIO_UNUSED_WD_MASK & (0x8 | 0x4)), SFIO);
+
+ ltq_ebu_w32(BUSRCON0_AGEN_SERIAL_FLASH | BUSRCON0_PORTW_8_BIT_MUX,
+ BUSRCON0);
+ ltq_ebu_w32(BUSWCON0_AGEN_SERIAL_FLASH, BUSWCON0);
+ /* set address wrap around to maximum for 24-bit addresses */
+ ltq_ebu_w32_mask(SFCON_DEV_SIZE_MASK, SFCON_DEV_SIZE_A23_0, SFCON);
+
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ return 0;
+}
+
+static int falcon_sflash_xfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct falcon_sflash *priv = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long spi_flags;
+ unsigned long flags;
+ int ret = 0;
+
+ priv->sfcmd = 0;
+ m->actual_length = 0;
+
+ spi_flags = FALCON_SPI_XFER_BEGIN;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (list_is_last(&t->transfer_list, &m->transfers))
+ spi_flags |= FALCON_SPI_XFER_END;
+
+ spin_lock_irqsave(&ebu_lock, flags);
+ ret = falcon_sflash_xfer(m->spi, t, spi_flags);
+ spin_unlock_irqrestore(&ebu_lock, flags);
+
+ if (ret)
+ break;
+
+ m->actual_length += t->len;
+
+ WARN_ON(t->delay_usecs || t->cs_change);
+ spi_flags = 0;
+ }
+
+ m->status = ret;
+ spi_finalize_current_message(master);
+
+ return 0;
+}
+
+static int falcon_sflash_probe(struct platform_device *pdev)
+{
+ struct falcon_sflash *priv;
+ struct spi_master *master;
+ int ret;
+
+ if (ltq_boot_select() != BS_SPI) {
+ dev_err(&pdev->dev, "invalid bootstrap options\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*priv));
+ if (!master)
+ return -ENOMEM;
+
+ priv = spi_master_get_devdata(master);
+ priv->master = master;
+
+ master->mode_bits = SPI_MODE_3;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = falcon_sflash_setup;
+ master->transfer_one_message = falcon_sflash_xfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ spi_master_put(master);
+ return ret;
+}
+
+static const struct of_device_id falcon_sflash_match[] = {
+ { .compatible = "lantiq,sflash-falcon" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, falcon_sflash_match);
+
+static struct platform_driver falcon_sflash_driver = {
+ .probe = falcon_sflash_probe,
+ .driver = {
+ .name = DRV_NAME,
+ .of_match_table = falcon_sflash_match,
+ }
+};
+
+module_platform_driver(falcon_sflash_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Lantiq Falcon SPI/SFLASH controller driver");
diff --git a/drivers/spi/spi-fsl-cpm.c b/drivers/spi/spi-fsl-cpm.c
new file mode 100644
index 000000000..896add8cf
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.c
@@ -0,0 +1,406 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <asm/cpm.h>
+#include <asm/qe.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_address.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+#include <linux/platform_device.h>
+
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-lib.h"
+#include "spi-fsl-spi.h"
+
+/* CPM1 and CPM2 are mutually exclusive. */
+#ifdef CONFIG_CPM1
+#include <asm/cpm1.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_CH_SPI, 0)
+#else
+#include <asm/cpm2.h>
+#define CPM_SPI_CMD mk_cr_cmd(CPM_CR_SPI_PAGE, CPM_CR_SPI_SBLOCK, 0, 0)
+#endif
+
+#define SPIE_TXB 0x00000200 /* Last char is written to tx fifo */
+#define SPIE_RXB 0x00000100 /* Last char is written to rx buf */
+
+/* SPCOM register values */
+#define SPCOM_STR (1 << 23) /* Start transmit */
+
+#define SPI_PRAM_SIZE 0x100
+#define SPI_MRBLR ((unsigned int)PAGE_SIZE)
+
+static void *fsl_dummy_rx;
+static DEFINE_MUTEX(fsl_dummy_rx_lock);
+static int fsl_dummy_rx_refcnt;
+
+void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi)
+{
+ if (mspi->flags & SPI_QE) {
+ qe_issue_cmd(QE_INIT_TX_RX, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, 0);
+ } else {
+ if (mspi->flags & SPI_CPM1) {
+ out_be32(&mspi->pram->rstate, 0);
+ out_be16(&mspi->pram->rbptr,
+ in_be16(&mspi->pram->rbase));
+ out_be32(&mspi->pram->tstate, 0);
+ out_be16(&mspi->pram->tbptr,
+ in_be16(&mspi->pram->tbase));
+ } else {
+ cpm_command(CPM_SPI_CMD, CPM_CR_INIT_TRX);
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_reinit_txrx);
+
+static void fsl_spi_cpm_bufs_start(struct mpc8xxx_spi *mspi)
+{
+ struct cpm_buf_desc __iomem *tx_bd = mspi->tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd = mspi->rx_bd;
+ unsigned int xfer_len = min(mspi->count, SPI_MRBLR);
+ unsigned int xfer_ofs;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ xfer_ofs = mspi->xfer_in_progress->len - mspi->count;
+
+ if (mspi->rx_dma == mspi->dma_dummy_rx)
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma);
+ else
+ out_be32(&rx_bd->cbd_bufaddr, mspi->rx_dma + xfer_ofs);
+ out_be16(&rx_bd->cbd_datlen, 0);
+ out_be16(&rx_bd->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT | BD_SC_WRAP);
+
+ if (mspi->tx_dma == mspi->dma_dummy_tx)
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma);
+ else
+ out_be32(&tx_bd->cbd_bufaddr, mspi->tx_dma + xfer_ofs);
+ out_be16(&tx_bd->cbd_datlen, xfer_len);
+ out_be16(&tx_bd->cbd_sc, BD_SC_READY | BD_SC_INTRPT | BD_SC_WRAP |
+ BD_SC_LAST);
+
+ /* start transfer */
+ mpc8xxx_spi_write_reg(&reg_base->command, SPCOM_STR);
+}
+
+int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped)
+{
+ struct device *dev = mspi->dev;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ if (is_dma_mapped) {
+ mspi->map_tx_dma = 0;
+ mspi->map_rx_dma = 0;
+ } else {
+ mspi->map_tx_dma = 1;
+ mspi->map_rx_dma = 1;
+ }
+
+ if (!t->tx_buf) {
+ mspi->tx_dma = mspi->dma_dummy_tx;
+ mspi->map_tx_dma = 0;
+ }
+
+ if (!t->rx_buf) {
+ mspi->rx_dma = mspi->dma_dummy_rx;
+ mspi->map_rx_dma = 0;
+ }
+
+ if (mspi->map_tx_dma) {
+ void *nonconst_tx = (void *)mspi->tx; /* shut up gcc */
+
+ mspi->tx_dma = dma_map_single(dev, nonconst_tx, t->len,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->tx_dma)) {
+ dev_err(dev, "unable to map tx dma\n");
+ return -ENOMEM;
+ }
+ } else if (t->tx_buf) {
+ mspi->tx_dma = t->tx_dma;
+ }
+
+ if (mspi->map_rx_dma) {
+ mspi->rx_dma = dma_map_single(dev, mspi->rx, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->rx_dma)) {
+ dev_err(dev, "unable to map rx dma\n");
+ goto err_rx_dma;
+ }
+ } else if (t->rx_buf) {
+ mspi->rx_dma = t->rx_dma;
+ }
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIE_RXB);
+
+ mspi->xfer_in_progress = t;
+ mspi->count = t->len;
+
+ /* start CPM transfers */
+ fsl_spi_cpm_bufs_start(mspi);
+
+ return 0;
+
+err_rx_dma:
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs);
+
+void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct spi_transfer *t = mspi->xfer_in_progress;
+
+ if (mspi->map_tx_dma)
+ dma_unmap_single(dev, mspi->tx_dma, t->len, DMA_TO_DEVICE);
+ if (mspi->map_rx_dma)
+ dma_unmap_single(dev, mspi->rx_dma, t->len, DMA_FROM_DEVICE);
+ mspi->xfer_in_progress = NULL;
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_bufs_complete);
+
+void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ u16 len;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ dev_dbg(mspi->dev, "%s: bd datlen %d, count %d\n", __func__,
+ in_be16(&mspi->rx_bd->cbd_datlen), mspi->count);
+
+ len = in_be16(&mspi->rx_bd->cbd_datlen);
+ if (len > mspi->count) {
+ WARN_ON(1);
+ len = mspi->count;
+ }
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= len;
+ if (mspi->count)
+ fsl_spi_cpm_bufs_start(mspi);
+ else
+ complete(&mspi->done);
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_irq);
+
+static void *fsl_spi_alloc_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ if (!fsl_dummy_rx)
+ fsl_dummy_rx = kmalloc(SPI_MRBLR, GFP_KERNEL);
+ if (fsl_dummy_rx)
+ fsl_dummy_rx_refcnt++;
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+
+ return fsl_dummy_rx;
+}
+
+static void fsl_spi_free_dummy_rx(void)
+{
+ mutex_lock(&fsl_dummy_rx_lock);
+
+ switch (fsl_dummy_rx_refcnt) {
+ case 0:
+ WARN_ON(1);
+ break;
+ case 1:
+ kfree(fsl_dummy_rx);
+ fsl_dummy_rx = NULL;
+ /* fall through */
+ default:
+ fsl_dummy_rx_refcnt--;
+ break;
+ }
+
+ mutex_unlock(&fsl_dummy_rx_lock);
+}
+
+static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ void __iomem *spi_base;
+ unsigned long pram_ofs = -ENOMEM;
+
+ /* Can't use of_address_to_resource(), QE muram isn't at 0. */
+ iprop = of_get_property(np, "reg", &size);
+
+ /* QE with a fixed pram location? */
+ if (mspi->flags & SPI_QE && iprop && size == sizeof(*iprop) * 4)
+ return cpm_muram_alloc_fixed(iprop[2], SPI_PRAM_SIZE);
+
+ /* QE but with a dynamic pram location? */
+ if (mspi->flags & SPI_QE) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, mspi->subblock,
+ QE_CR_PROTOCOL_UNSPECIFIED, pram_ofs);
+ return pram_ofs;
+ }
+
+ spi_base = of_iomap(np, 1);
+ if (spi_base == NULL)
+ return -EINVAL;
+
+ if (mspi->flags & SPI_CPM2) {
+ pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
+ out_be16(spi_base, pram_ofs);
+ }
+
+ iounmap(spi_base);
+ return pram_ofs;
+}
+
+int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+ struct device_node *np = dev->of_node;
+ const u32 *iprop;
+ int size;
+ unsigned long bds_ofs;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return 0;
+
+ if (!fsl_spi_alloc_dummy_rx())
+ return -ENOMEM;
+
+ if (mspi->flags & SPI_QE) {
+ iprop = of_get_property(np, "cell-index", &size);
+ if (iprop && size == sizeof(*iprop))
+ mspi->subblock = *iprop;
+
+ switch (mspi->subblock) {
+ default:
+ dev_warn(dev, "cell-index unspecified, assuming SPI1\n");
+ /* fall through */
+ case 0:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI1;
+ break;
+ case 1:
+ mspi->subblock = QE_CR_SUBBLOCK_SPI2;
+ break;
+ }
+ }
+
+ if (mspi->flags & SPI_CPM1) {
+ struct resource *res;
+ void *pram;
+
+ res = platform_get_resource(to_platform_device(dev),
+ IORESOURCE_MEM, 1);
+ pram = devm_ioremap_resource(dev, res);
+ if (IS_ERR(pram))
+ mspi->pram = NULL;
+ else
+ mspi->pram = pram;
+ } else {
+ unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
+
+ if (IS_ERR_VALUE(pram_ofs))
+ mspi->pram = NULL;
+ else
+ mspi->pram = cpm_muram_addr(pram_ofs);
+ }
+ if (mspi->pram == NULL) {
+ dev_err(dev, "can't allocate spi parameter ram\n");
+ goto err_pram;
+ }
+
+ bds_ofs = cpm_muram_alloc(sizeof(*mspi->tx_bd) +
+ sizeof(*mspi->rx_bd), 8);
+ if (IS_ERR_VALUE(bds_ofs)) {
+ dev_err(dev, "can't allocate bds\n");
+ goto err_bds;
+ }
+
+ mspi->dma_dummy_tx = dma_map_single(dev, empty_zero_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_tx)) {
+ dev_err(dev, "unable to map dummy tx buffer\n");
+ goto err_dummy_tx;
+ }
+
+ mspi->dma_dummy_rx = dma_map_single(dev, fsl_dummy_rx, SPI_MRBLR,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, mspi->dma_dummy_rx)) {
+ dev_err(dev, "unable to map dummy rx buffer\n");
+ goto err_dummy_rx;
+ }
+
+ mspi->tx_bd = cpm_muram_addr(bds_ofs);
+ mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
+
+ /* Initialize parameter ram. */
+ out_be16(&mspi->pram->tbase, cpm_muram_offset(mspi->tx_bd));
+ out_be16(&mspi->pram->rbase, cpm_muram_offset(mspi->rx_bd));
+ out_8(&mspi->pram->tfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_8(&mspi->pram->rfcr, CPMFCR_EB | CPMFCR_GBL);
+ out_be16(&mspi->pram->mrblr, SPI_MRBLR);
+ out_be32(&mspi->pram->rstate, 0);
+ out_be32(&mspi->pram->rdp, 0);
+ out_be16(&mspi->pram->rbptr, 0);
+ out_be16(&mspi->pram->rbc, 0);
+ out_be32(&mspi->pram->rxtmp, 0);
+ out_be32(&mspi->pram->tstate, 0);
+ out_be32(&mspi->pram->tdp, 0);
+ out_be16(&mspi->pram->tbptr, 0);
+ out_be16(&mspi->pram->tbc, 0);
+ out_be32(&mspi->pram->txtmp, 0);
+
+ return 0;
+
+err_dummy_rx:
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+err_dummy_tx:
+ cpm_muram_free(bds_ofs);
+err_bds:
+ if (!(mspi->flags & SPI_CPM1))
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+err_pram:
+ fsl_spi_free_dummy_rx();
+ return -ENOMEM;
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_init);
+
+void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi)
+{
+ struct device *dev = mspi->dev;
+
+ if (!(mspi->flags & SPI_CPM_MODE))
+ return;
+
+ dma_unmap_single(dev, mspi->dma_dummy_rx, SPI_MRBLR, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, mspi->dma_dummy_tx, PAGE_SIZE, DMA_TO_DEVICE);
+ cpm_muram_free(cpm_muram_offset(mspi->tx_bd));
+ cpm_muram_free(cpm_muram_offset(mspi->pram));
+ fsl_spi_free_dummy_rx();
+}
+EXPORT_SYMBOL_GPL(fsl_spi_cpm_free);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-cpm.h b/drivers/spi/spi-fsl-cpm.h
new file mode 100644
index 000000000..c71115805
--- /dev/null
+++ b/drivers/spi/spi-fsl-cpm.h
@@ -0,0 +1,43 @@
+/*
+ * Freescale SPI controller driver cpm functions.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_CPM_H__
+#define __SPI_FSL_CPM_H__
+
+#include "spi-fsl-lib.h"
+
+#ifdef CONFIG_FSL_SOC
+extern void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi);
+extern int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, bool is_dma_mapped);
+extern void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events);
+extern int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi);
+extern void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi);
+#else
+static inline void fsl_spi_cpm_reinit_txrx(struct mpc8xxx_spi *mspi) { }
+static inline int fsl_spi_cpm_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t,
+ bool is_dma_mapped) { return 0; }
+static inline void fsl_spi_cpm_bufs_complete(struct mpc8xxx_spi *mspi) { }
+static inline void fsl_spi_cpm_irq(struct mpc8xxx_spi *mspi, u32 events) { }
+static inline int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi) { return 0; }
+static inline void fsl_spi_cpm_free(struct mpc8xxx_spi *mspi) { }
+#endif
+
+#endif /* __SPI_FSL_CPM_H__ */
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
new file mode 100644
index 000000000..5fe54cda3
--- /dev/null
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -0,0 +1,643 @@
+/*
+ * drivers/spi/spi-fsl-dspi.c
+ *
+ * Copyright 2013 Freescale Semiconductor, Inc.
+ *
+ * Freescale DSPI driver
+ * This file contains a driver for the Freescale DSPI
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/math64.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/sched.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/time.h>
+
+#define DRIVER_NAME "fsl-dspi"
+
+#define TRAN_STATE_RX_VOID 0x01
+#define TRAN_STATE_TX_VOID 0x02
+#define TRAN_STATE_WORD_ODD_NUM 0x04
+
+#define DSPI_FIFO_SIZE 4
+
+#define SPI_MCR 0x00
+#define SPI_MCR_MASTER (1 << 31)
+#define SPI_MCR_PCSIS (0x3F << 16)
+#define SPI_MCR_CLR_TXF (1 << 11)
+#define SPI_MCR_CLR_RXF (1 << 10)
+
+#define SPI_TCR 0x08
+
+#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
+#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
+#define SPI_CTAR_CPOL(x) ((x) << 26)
+#define SPI_CTAR_CPHA(x) ((x) << 25)
+#define SPI_CTAR_LSBFE(x) ((x) << 24)
+#define SPI_CTAR_PCSSCK(x) (((x) & 0x00000003) << 22)
+#define SPI_CTAR_PASC(x) (((x) & 0x00000003) << 20)
+#define SPI_CTAR_PDT(x) (((x) & 0x00000003) << 18)
+#define SPI_CTAR_PBR(x) (((x) & 0x00000003) << 16)
+#define SPI_CTAR_CSSCK(x) (((x) & 0x0000000f) << 12)
+#define SPI_CTAR_ASC(x) (((x) & 0x0000000f) << 8)
+#define SPI_CTAR_DT(x) (((x) & 0x0000000f) << 4)
+#define SPI_CTAR_BR(x) ((x) & 0x0000000f)
+#define SPI_CTAR_SCALE_BITS 0xf
+
+#define SPI_CTAR0_SLAVE 0x0c
+
+#define SPI_SR 0x2c
+#define SPI_SR_EOQF 0x10000000
+
+#define SPI_RSER 0x30
+#define SPI_RSER_EOQFE 0x10000000
+
+#define SPI_PUSHR 0x34
+#define SPI_PUSHR_CONT (1 << 31)
+#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
+#define SPI_PUSHR_EOQ (1 << 27)
+#define SPI_PUSHR_CTCNT (1 << 26)
+#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
+#define SPI_PUSHR_TXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_PUSHR_SLAVE 0x34
+
+#define SPI_POPR 0x38
+#define SPI_POPR_RXDATA(x) ((x) & 0x0000ffff)
+
+#define SPI_TXFR0 0x3c
+#define SPI_TXFR1 0x40
+#define SPI_TXFR2 0x44
+#define SPI_TXFR3 0x48
+#define SPI_RXFR0 0x7c
+#define SPI_RXFR1 0x80
+#define SPI_RXFR2 0x84
+#define SPI_RXFR3 0x88
+
+#define SPI_FRAME_BITS(bits) SPI_CTAR_FMSZ((bits) - 1)
+#define SPI_FRAME_BITS_MASK SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_16 SPI_CTAR_FMSZ(0xf)
+#define SPI_FRAME_BITS_8 SPI_CTAR_FMSZ(0x7)
+
+#define SPI_CS_INIT 0x01
+#define SPI_CS_ASSERT 0x02
+#define SPI_CS_DROP 0x04
+
+struct chip_data {
+ u32 mcr_val;
+ u32 ctar_val;
+ u16 void_write_data;
+};
+
+struct fsl_dspi {
+ struct spi_master *master;
+ struct platform_device *pdev;
+
+ struct regmap *regmap;
+ int irq;
+ struct clk *clk;
+
+ struct spi_transfer *cur_transfer;
+ struct spi_message *cur_msg;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ char dataflags;
+ u8 cs;
+ u16 void_write_data;
+ u32 cs_change;
+
+ wait_queue_head_t waitq;
+ u32 waitflags;
+};
+
+static inline int is_double_byte_mode(struct fsl_dspi *dspi)
+{
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_CTAR(dspi->cs), &val);
+
+ return ((val & SPI_FRAME_BITS_MASK) == SPI_FRAME_BITS(8)) ? 0 : 1;
+}
+
+static void hz_to_spi_baud(char *pbr, char *br, int speed_hz,
+ unsigned long clkrate)
+{
+ /* Valid baud rate pre-scaler values */
+ int pbr_tbl[4] = {2, 3, 5, 7};
+ int brs[16] = { 2, 4, 6, 8,
+ 16, 32, 64, 128,
+ 256, 512, 1024, 2048,
+ 4096, 8192, 16384, 32768 };
+ int scale_needed, scale, minscale = INT_MAX;
+ int i, j;
+
+ scale_needed = clkrate / speed_hz;
+ if (clkrate % speed_hz)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(brs); i++)
+ for (j = 0; j < ARRAY_SIZE(pbr_tbl); j++) {
+ scale = brs[i] * pbr_tbl[j];
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *br = i;
+ *pbr = j;
+ }
+ break;
+ }
+ }
+
+ if (minscale == INT_MAX) {
+ pr_warn("Can not find valid baud rate,speed_hz is %d,clkrate is %ld, we use the max prescaler value.\n",
+ speed_hz, clkrate);
+ *pbr = ARRAY_SIZE(pbr_tbl) - 1;
+ *br = ARRAY_SIZE(brs) - 1;
+ }
+}
+
+static void ns_delay_scale(char *psc, char *sc, int delay_ns,
+ unsigned long clkrate)
+{
+ int pscale_tbl[4] = {1, 3, 5, 7};
+ int scale_needed, scale, minscale = INT_MAX;
+ int i, j;
+ u32 remainder;
+
+ scale_needed = div_u64_rem((u64)delay_ns * clkrate, NSEC_PER_SEC,
+ &remainder);
+ if (remainder)
+ scale_needed++;
+
+ for (i = 0; i < ARRAY_SIZE(pscale_tbl); i++)
+ for (j = 0; j <= SPI_CTAR_SCALE_BITS; j++) {
+ scale = pscale_tbl[i] * (2 << j);
+ if (scale >= scale_needed) {
+ if (scale < minscale) {
+ minscale = scale;
+ *psc = i;
+ *sc = j;
+ }
+ break;
+ }
+ }
+
+ if (minscale == INT_MAX) {
+ pr_warn("Cannot find correct scale values for %dns delay at clkrate %ld, using max prescaler value",
+ delay_ns, clkrate);
+ *psc = ARRAY_SIZE(pscale_tbl) - 1;
+ *sc = SPI_CTAR_SCALE_BITS;
+ }
+}
+
+static int dspi_transfer_write(struct fsl_dspi *dspi)
+{
+ int tx_count = 0;
+ int tx_word;
+ u16 d16;
+ u8 d8;
+ u32 dspi_pushr = 0;
+ int first = 1;
+
+ tx_word = is_double_byte_mode(dspi);
+
+ /* If we are in word mode, but only have a single byte to transfer
+ * then switch to byte mode temporarily. Will switch back at the
+ * end of the transfer.
+ */
+ if (tx_word && (dspi->len == 1)) {
+ dspi->dataflags |= TRAN_STATE_WORD_ODD_NUM;
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(8));
+ tx_word = 0;
+ }
+
+ while (dspi->len && (tx_count < DSPI_FIFO_SIZE)) {
+ if (tx_word) {
+ if (dspi->len == 1)
+ break;
+
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+ d16 = *(u16 *)dspi->tx;
+ dspi->tx += 2;
+ } else {
+ d16 = dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d16) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len -= 2;
+ } else {
+ if (!(dspi->dataflags & TRAN_STATE_TX_VOID)) {
+
+ d8 = *(u8 *)dspi->tx;
+ dspi->tx++;
+ } else {
+ d8 = (u8)dspi->void_write_data;
+ }
+
+ dspi_pushr = SPI_PUSHR_TXDATA(d8) |
+ SPI_PUSHR_PCS(dspi->cs) |
+ SPI_PUSHR_CTAS(dspi->cs) |
+ SPI_PUSHR_CONT;
+
+ dspi->len--;
+ }
+
+ if (dspi->len == 0 || tx_count == DSPI_FIFO_SIZE - 1) {
+ /* last transfer in the transfer */
+ dspi_pushr |= SPI_PUSHR_EOQ;
+ if ((dspi->cs_change) && (!dspi->len))
+ dspi_pushr &= ~SPI_PUSHR_CONT;
+ } else if (tx_word && (dspi->len == 1))
+ dspi_pushr |= SPI_PUSHR_EOQ;
+
+ if (first) {
+ first = 0;
+ dspi_pushr |= SPI_PUSHR_CTCNT; /* clear counter */
+ }
+
+ regmap_write(dspi->regmap, SPI_PUSHR, dspi_pushr);
+
+ tx_count++;
+ }
+
+ return tx_count * (tx_word + 1);
+}
+
+static int dspi_transfer_read(struct fsl_dspi *dspi)
+{
+ int rx_count = 0;
+ int rx_word = is_double_byte_mode(dspi);
+ u16 d;
+
+ while ((dspi->rx < dspi->rx_end)
+ && (rx_count < DSPI_FIFO_SIZE)) {
+ if (rx_word) {
+ unsigned int val;
+
+ if ((dspi->rx_end - dspi->rx) == 1)
+ break;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
+
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u16 *)dspi->rx = d;
+ dspi->rx += 2;
+
+ } else {
+ unsigned int val;
+
+ regmap_read(dspi->regmap, SPI_POPR, &val);
+ d = SPI_POPR_RXDATA(val);
+ if (!(dspi->dataflags & TRAN_STATE_RX_VOID))
+ *(u8 *)dspi->rx = d;
+ dspi->rx++;
+ }
+ rx_count++;
+ }
+
+ return rx_count;
+}
+
+static int dspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *message)
+{
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+ struct spi_device *spi = message->spi;
+ struct spi_transfer *transfer;
+ int status = 0;
+ message->actual_length = 0;
+
+ list_for_each_entry(transfer, &message->transfers, transfer_list) {
+ dspi->cur_transfer = transfer;
+ dspi->cur_msg = message;
+ dspi->cur_chip = spi_get_ctldata(spi);
+ dspi->cs = spi->chip_select;
+ if (dspi->cur_transfer->transfer_list.next
+ == &dspi->cur_msg->transfers)
+ transfer->cs_change = 1;
+ dspi->cs_change = transfer->cs_change;
+ dspi->void_write_data = dspi->cur_chip->void_write_data;
+
+ dspi->dataflags = 0;
+ dspi->tx = (void *)transfer->tx_buf;
+ dspi->tx_end = dspi->tx + transfer->len;
+ dspi->rx = transfer->rx_buf;
+ dspi->rx_end = dspi->rx + transfer->len;
+ dspi->len = transfer->len;
+
+ if (!dspi->rx)
+ dspi->dataflags |= TRAN_STATE_RX_VOID;
+
+ if (!dspi->tx)
+ dspi->dataflags |= TRAN_STATE_TX_VOID;
+
+ regmap_write(dspi->regmap, SPI_MCR, dspi->cur_chip->mcr_val);
+ regmap_update_bits(dspi->regmap, SPI_MCR,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF,
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF);
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
+ if (transfer->speed_hz)
+ regmap_write(dspi->regmap, SPI_CTAR(dspi->cs),
+ dspi->cur_chip->ctar_val);
+
+ regmap_write(dspi->regmap, SPI_RSER, SPI_RSER_EOQFE);
+ message->actual_length += dspi_transfer_write(dspi);
+
+ if (wait_event_interruptible(dspi->waitq, dspi->waitflags))
+ dev_err(&dspi->pdev->dev, "wait transfer complete fail!\n");
+ dspi->waitflags = 0;
+
+ if (transfer->delay_usecs)
+ udelay(transfer->delay_usecs);
+ }
+
+ message->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int dspi_setup(struct spi_device *spi)
+{
+ struct chip_data *chip;
+ struct fsl_dspi *dspi = spi_master_get_devdata(spi->master);
+ u32 cs_sck_delay = 0, sck_cs_delay = 0;
+ unsigned char br = 0, pbr = 0, pcssck = 0, cssck = 0;
+ unsigned char pasc = 0, asc = 0, fmsz = 0;
+ unsigned long clkrate;
+
+ if ((spi->bits_per_word >= 4) && (spi->bits_per_word <= 16)) {
+ fmsz = spi->bits_per_word - 1;
+ } else {
+ pr_err("Invalid wordsize\n");
+ return -ENODEV;
+ }
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ }
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-cs-sck-delay",
+ &cs_sck_delay);
+
+ of_property_read_u32(spi->dev.of_node, "fsl,spi-sck-cs-delay",
+ &sck_cs_delay);
+
+ chip->mcr_val = SPI_MCR_MASTER | SPI_MCR_PCSIS |
+ SPI_MCR_CLR_TXF | SPI_MCR_CLR_RXF;
+
+ chip->void_write_data = 0;
+
+ clkrate = clk_get_rate(dspi->clk);
+ hz_to_spi_baud(&pbr, &br, spi->max_speed_hz, clkrate);
+
+ /* Set PCS to SCK delay scale values */
+ ns_delay_scale(&pcssck, &cssck, cs_sck_delay, clkrate);
+
+ /* Set After SCK delay scale values */
+ ns_delay_scale(&pasc, &asc, sck_cs_delay, clkrate);
+
+ chip->ctar_val = SPI_CTAR_FMSZ(fmsz)
+ | SPI_CTAR_CPOL(spi->mode & SPI_CPOL ? 1 : 0)
+ | SPI_CTAR_CPHA(spi->mode & SPI_CPHA ? 1 : 0)
+ | SPI_CTAR_LSBFE(spi->mode & SPI_LSB_FIRST ? 1 : 0)
+ | SPI_CTAR_PCSSCK(pcssck)
+ | SPI_CTAR_CSSCK(cssck)
+ | SPI_CTAR_PASC(pasc)
+ | SPI_CTAR_ASC(asc)
+ | SPI_CTAR_PBR(pbr)
+ | SPI_CTAR_BR(br);
+
+ spi_set_ctldata(spi, chip);
+
+ return 0;
+}
+
+static void dspi_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata((struct spi_device *)spi);
+
+ dev_dbg(&spi->dev, "spi_device %u.%u cleanup\n",
+ spi->master->bus_num, spi->chip_select);
+
+ kfree(chip);
+}
+
+static irqreturn_t dspi_interrupt(int irq, void *dev_id)
+{
+ struct fsl_dspi *dspi = (struct fsl_dspi *)dev_id;
+
+ struct spi_message *msg = dspi->cur_msg;
+
+ regmap_write(dspi->regmap, SPI_SR, SPI_SR_EOQF);
+ dspi_transfer_read(dspi);
+
+ if (!dspi->len) {
+ if (dspi->dataflags & TRAN_STATE_WORD_ODD_NUM)
+ regmap_update_bits(dspi->regmap, SPI_CTAR(dspi->cs),
+ SPI_FRAME_BITS_MASK, SPI_FRAME_BITS(16));
+
+ dspi->waitflags = 1;
+ wake_up_interruptible(&dspi->waitq);
+ } else
+ msg->actual_length += dspi_transfer_write(dspi);
+
+ return IRQ_HANDLED;
+}
+
+static const struct of_device_id fsl_dspi_dt_ids[] = {
+ { .compatible = "fsl,vf610-dspi", .data = NULL, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fsl_dspi_dt_ids);
+
+#ifdef CONFIG_PM_SLEEP
+static int dspi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ spi_master_suspend(master);
+ clk_disable_unprepare(dspi->clk);
+
+ return 0;
+}
+
+static int dspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ clk_prepare_enable(dspi->clk);
+ spi_master_resume(master);
+
+ return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(dspi_pm, dspi_suspend, dspi_resume);
+
+static const struct regmap_config dspi_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = 0x88,
+};
+
+static int dspi_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct fsl_dspi *dspi;
+ struct resource *res;
+ void __iomem *base;
+ int ret = 0, cs_num, bus_num;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi));
+ if (!master)
+ return -ENOMEM;
+
+ dspi = spi_master_get_devdata(master);
+ dspi->pdev = pdev;
+ dspi->master = master;
+
+ master->transfer = NULL;
+ master->setup = dspi_setup;
+ master->transfer_one_message = dspi_transfer_one_message;
+ master->dev.of_node = pdev->dev.of_node;
+
+ master->cleanup = dspi_cleanup;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->bits_per_word_mask = SPI_BPW_MASK(4) | SPI_BPW_MASK(8) |
+ SPI_BPW_MASK(16);
+
+ ret = of_property_read_u32(np, "spi-num-chipselects", &cs_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get spi-num-chipselects\n");
+ goto out_master_put;
+ }
+ master->num_chipselect = cs_num;
+
+ ret = of_property_read_u32(np, "bus-num", &bus_num);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can't get bus-num\n");
+ goto out_master_put;
+ }
+ master->bus_num = bus_num;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_master_put;
+ }
+
+ dspi->regmap = devm_regmap_init_mmio_clk(&pdev->dev, "dspi", base,
+ &dspi_regmap_config);
+ if (IS_ERR(dspi->regmap)) {
+ dev_err(&pdev->dev, "failed to init regmap: %ld\n",
+ PTR_ERR(dspi->regmap));
+ return PTR_ERR(dspi->regmap);
+ }
+
+ dspi->irq = platform_get_irq(pdev, 0);
+ if (dspi->irq < 0) {
+ dev_err(&pdev->dev, "can't get platform irq\n");
+ ret = dspi->irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, dspi->irq, dspi_interrupt, 0,
+ pdev->name, dspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Unable to attach DSPI interrupt\n");
+ goto out_master_put;
+ }
+
+ dspi->clk = devm_clk_get(&pdev->dev, "dspi");
+ if (IS_ERR(dspi->clk)) {
+ ret = PTR_ERR(dspi->clk);
+ dev_err(&pdev->dev, "unable to get clock\n");
+ goto out_master_put;
+ }
+ clk_prepare_enable(dspi->clk);
+
+ init_waitqueue_head(&dspi->waitq);
+ platform_set_drvdata(pdev, master);
+
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Problem registering DSPI master\n");
+ goto out_clk_put;
+ }
+
+ return ret;
+
+out_clk_put:
+ clk_disable_unprepare(dspi->clk);
+out_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int dspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct fsl_dspi *dspi = spi_master_get_devdata(master);
+
+ /* Disconnect from the SPI framework */
+ clk_disable_unprepare(dspi->clk);
+ spi_unregister_master(dspi->master);
+ spi_master_put(dspi->master);
+
+ return 0;
+}
+
+static struct platform_driver fsl_dspi_driver = {
+ .driver.name = DRIVER_NAME,
+ .driver.of_match_table = fsl_dspi_dt_ids,
+ .driver.owner = THIS_MODULE,
+ .driver.pm = &dspi_pm,
+ .probe = dspi_probe,
+ .remove = dspi_remove,
+};
+module_platform_driver(fsl_dspi_driver);
+
+MODULE_DESCRIPTION("Freescale DSPI Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
new file mode 100644
index 000000000..80d245ac8
--- /dev/null
+++ b/drivers/spi/spi-fsl-espi.c
@@ -0,0 +1,880 @@
+/*
+ * Freescale eSPI controller driver.
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <sysdev/fsl_soc.h>
+
+#include "spi-fsl-lib.h"
+
+/* eSPI Controller registers */
+struct fsl_espi_reg {
+ __be32 mode; /* 0x000 - eSPI mode register */
+ __be32 event; /* 0x004 - eSPI event register */
+ __be32 mask; /* 0x008 - eSPI mask register */
+ __be32 command; /* 0x00c - eSPI command register */
+ __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/
+ __be32 receive; /* 0x014 - eSPI receive FIFO access register*/
+ u8 res[8]; /* 0x018 - 0x01c reserved */
+ __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */
+};
+
+struct fsl_espi_transfer {
+ const void *tx_buf;
+ void *rx_buf;
+ unsigned len;
+ unsigned n_tx;
+ unsigned n_rx;
+ unsigned actual_length;
+ int status;
+};
+
+/* eSPI Controller mode register definitions */
+#define SPMODE_ENABLE (1 << 31)
+#define SPMODE_LOOP (1 << 30)
+#define SPMODE_TXTHR(x) ((x) << 8)
+#define SPMODE_RXTHR(x) ((x) << 0)
+
+/* eSPI Controller CS mode register definitions */
+#define CSMODE_CI_INACTIVEHIGH (1 << 31)
+#define CSMODE_CP_BEGIN_EDGECLK (1 << 30)
+#define CSMODE_REV (1 << 29)
+#define CSMODE_DIV16 (1 << 28)
+#define CSMODE_PM(x) ((x) << 24)
+#define CSMODE_POL_1 (1 << 20)
+#define CSMODE_LEN(x) ((x) << 16)
+#define CSMODE_BEF(x) ((x) << 12)
+#define CSMODE_AFT(x) ((x) << 8)
+#define CSMODE_CG(x) ((x) << 3)
+
+/* Default mode/csmode for eSPI controller */
+#define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3))
+#define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \
+ | CSMODE_AFT(0) | CSMODE_CG(1))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+#define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F)
+#define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F)
+
+/* SPCOM register values */
+#define SPCOM_CS(x) ((x) << 30)
+#define SPCOM_TRANLEN(x) ((x) << 0)
+#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
+
+static void fsl_espi_change_mode(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ struct fsl_espi_reg *reg_base = mspi->reg_base;
+ __be32 __iomem *mode = &reg_base->csmode[spi->chip_select];
+ __be32 __iomem *espi_mode = &reg_base->mode;
+ u32 tmp;
+ unsigned long flags;
+
+ /* Turn off IRQs locally to minimize time that SPI is disabled. */
+ local_irq_save(flags);
+
+ /* Turn off SPI unit prior changing mode */
+ tmp = mpc8xxx_spi_read_reg(espi_mode);
+ mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE);
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode);
+ mpc8xxx_spi_write_reg(espi_mode, tmp);
+
+ local_irq_restore(flags);
+}
+
+static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi)
+{
+ u32 data;
+ u16 data_h;
+ u16 data_l;
+ const u32 *tx = mpc8xxx_spi->tx;
+
+ if (!tx)
+ return 0;
+
+ data = *tx++ << mpc8xxx_spi->tx_shift;
+ data_l = data & 0xffff;
+ data_h = (data >> 16) & 0xffff;
+ swab16s(&data_l);
+ swab16s(&data_h);
+ data = data_h | data_l;
+
+ mpc8xxx_spi->tx = tx;
+ return data;
+}
+
+static int fsl_espi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ int bits_per_word = 0;
+ u8 pm;
+ u32 hz = 0;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ cs->rx_shift = 0;
+ cs->tx_shift = 0;
+ cs->get_rx = mpc8xxx_spi_rx_buf_u32;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u32;
+ if (bits_per_word <= 8) {
+ cs->rx_shift = 8 - bits_per_word;
+ } else {
+ cs->rx_shift = 16 - bits_per_word;
+ if (spi->mode & SPI_LSB_FIRST)
+ cs->get_tx = fsl_espi_tx_buf_lsb;
+ }
+
+ mpc8xxx_spi->rx_shift = cs->rx_shift;
+ mpc8xxx_spi->tx_shift = cs->tx_shift;
+ mpc8xxx_spi->get_rx = cs->get_rx;
+ mpc8xxx_spi->get_tx = cs->get_tx;
+
+ bits_per_word = bits_per_word - 1;
+
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF));
+
+ cs->hw_mode |= CSMODE_LEN(bits_per_word);
+
+ if ((mpc8xxx_spi->spibrg / hz) > 64) {
+ cs->hw_mode |= CSMODE_DIV16;
+ pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4);
+
+ WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. "
+ "Will use %d Hz instead.\n", dev_name(&spi->dev),
+ hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1)));
+ if (pm > 33)
+ pm = 33;
+ } else {
+ pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4);
+ }
+ if (pm)
+ pm--;
+ if (pm < 2)
+ pm = 2;
+
+ cs->hw_mode |= CSMODE_PM(pm);
+
+ fsl_espi_change_mode(spi);
+ return 0;
+}
+
+static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t,
+ unsigned int len)
+{
+ u32 word;
+ struct fsl_espi_reg *reg_base = mspi->reg_base;
+
+ mspi->count = len;
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mspi->get_tx(mspi);
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+
+ return 0;
+}
+
+static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base;
+ unsigned int len = t->len;
+ int ret;
+
+ mpc8xxx_spi->len = t->len;
+ len = roundup(len, 4) / 4;
+
+ mpc8xxx_spi->tx = t->tx_buf;
+ mpc8xxx_spi->rx = t->rx_buf;
+
+ reinit_completion(&mpc8xxx_spi->done);
+
+ /* Set SPCOM[CS] and SPCOM[TRANLEN] field */
+ if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
+ dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
+ " beyond the SPCOM[TRANLEN] field\n", t->len);
+ return -EINVAL;
+ }
+ mpc8xxx_spi_write_reg(&reg_base->command,
+ (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1)));
+
+ ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&mpc8xxx_spi->done);
+
+ /* disable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+
+ return mpc8xxx_spi->count;
+}
+
+static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd)
+{
+ if (cmd) {
+ cmd[1] = (u8)(addr >> 16);
+ cmd[2] = (u8)(addr >> 8);
+ cmd[3] = (u8)(addr >> 0);
+ }
+}
+
+static inline unsigned int fsl_espi_cmd2addr(u8 *cmd)
+{
+ if (cmd)
+ return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0;
+
+ return 0;
+}
+
+static void fsl_espi_do_trans(struct spi_message *m,
+ struct fsl_espi_transfer *tr)
+{
+ struct spi_device *spi = m->spi;
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct fsl_espi_transfer *espi_trans = tr;
+ struct spi_message message;
+ struct spi_transfer *t, *first, trans;
+ int status = 0;
+
+ spi_message_init(&message);
+ memset(&trans, 0, sizeof(trans));
+
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
+ espi_trans->status = -EINVAL;
+ dev_err(mspi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return;
+ }
+
+ trans.speed_hz = t->speed_hz;
+ trans.bits_per_word = t->bits_per_word;
+ trans.delay_usecs = max(first->delay_usecs, t->delay_usecs);
+ }
+
+ trans.len = espi_trans->len;
+ trans.tx_buf = espi_trans->tx_buf;
+ trans.rx_buf = espi_trans->rx_buf;
+ spi_message_add_tail(&trans, &message);
+
+ list_for_each_entry(t, &message.transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = -EINVAL;
+
+ status = fsl_espi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (t->len)
+ status = fsl_espi_bufs(spi, t);
+
+ if (status) {
+ status = -EMSGSIZE;
+ break;
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ }
+
+ espi_trans->status = status;
+ fsl_espi_setup_transfer(spi, NULL);
+}
+
+static void fsl_espi_cmd_trans(struct spi_message *m,
+ struct fsl_espi_transfer *trans, u8 *rx_buff)
+{
+ struct spi_transfer *t;
+ u8 *local_buf;
+ int i = 0;
+ struct fsl_espi_transfer *espi_trans = trans;
+
+ local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
+ if (!local_buf) {
+ espi_trans->status = -ENOMEM;
+ return;
+ }
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf) {
+ memcpy(local_buf + i, t->tx_buf, t->len);
+ i += t->len;
+ }
+ }
+
+ espi_trans->tx_buf = local_buf;
+ espi_trans->rx_buf = local_buf;
+ fsl_espi_do_trans(m, espi_trans);
+
+ espi_trans->actual_length = espi_trans->len;
+ kfree(local_buf);
+}
+
+static void fsl_espi_rw_trans(struct spi_message *m,
+ struct fsl_espi_transfer *trans, u8 *rx_buff)
+{
+ struct fsl_espi_transfer *espi_trans = trans;
+ unsigned int total_len = espi_trans->len;
+ struct spi_transfer *t;
+ u8 *local_buf;
+ u8 *rx_buf = rx_buff;
+ unsigned int trans_len;
+ unsigned int addr;
+ unsigned int tx_only;
+ unsigned int rx_pos = 0;
+ unsigned int pos;
+ int i, loop;
+
+ local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
+ if (!local_buf) {
+ espi_trans->status = -ENOMEM;
+ return;
+ }
+
+ for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
+ trans_len = total_len - pos;
+
+ i = 0;
+ tx_only = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf) {
+ memcpy(local_buf + i, t->tx_buf, t->len);
+ i += t->len;
+ if (!t->rx_buf)
+ tx_only += t->len;
+ }
+ }
+
+ /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
+ if (loop > 0)
+ trans_len += tx_only;
+
+ if (trans_len > SPCOM_TRANLEN_MAX)
+ trans_len = SPCOM_TRANLEN_MAX;
+
+ /* Update device offset */
+ if (pos > 0) {
+ addr = fsl_espi_cmd2addr(local_buf);
+ addr += rx_pos;
+ fsl_espi_addr2cmd(addr, local_buf);
+ }
+
+ espi_trans->len = trans_len;
+ espi_trans->tx_buf = local_buf;
+ espi_trans->rx_buf = local_buf;
+ fsl_espi_do_trans(m, espi_trans);
+
+ /* If there is at least one RX byte then copy it to rx_buf */
+ if (tx_only < SPCOM_TRANLEN_MAX)
+ memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
+ trans_len - tx_only);
+
+ rx_pos += trans_len - tx_only;
+
+ if (loop > 0)
+ espi_trans->actual_length += espi_trans->len - tx_only;
+ else
+ espi_trans->actual_length += espi_trans->len;
+ }
+
+ kfree(local_buf);
+}
+
+static int fsl_espi_do_one_msg(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_transfer *t;
+ u8 *rx_buf = NULL;
+ unsigned int n_tx = 0;
+ unsigned int n_rx = 0;
+ unsigned int xfer_len = 0;
+ struct fsl_espi_transfer espi_trans;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf)
+ n_tx += t->len;
+ if (t->rx_buf) {
+ n_rx += t->len;
+ rx_buf = t->rx_buf;
+ }
+ if ((t->tx_buf) || (t->rx_buf))
+ xfer_len += t->len;
+ }
+
+ espi_trans.n_tx = n_tx;
+ espi_trans.n_rx = n_rx;
+ espi_trans.len = xfer_len;
+ espi_trans.actual_length = 0;
+ espi_trans.status = 0;
+
+ if (!rx_buf)
+ fsl_espi_cmd_trans(m, &espi_trans, NULL);
+ else
+ fsl_espi_rw_trans(m, &espi_trans, rx_buf);
+
+ m->actual_length = espi_trans.actual_length;
+ m->status = espi_trans.status;
+ spi_finalize_current_message(master);
+ return 0;
+}
+
+static int fsl_espi_setup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ int retval;
+ u32 hw_mode;
+ u32 loop_mode;
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi_set_ctldata(spi, cs);
+ }
+
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ hw_mode = cs->hw_mode; /* Save original settings */
+ cs->hw_mode = mpc8xxx_spi_read_reg(
+ &reg_base->csmode[spi->chip_select]);
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH
+ | CSMODE_REV);
+
+ if (spi->mode & SPI_CPHA)
+ cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ cs->hw_mode |= CSMODE_CI_INACTIVEHIGH;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ cs->hw_mode |= CSMODE_REV;
+
+ /* Handle the loop mode */
+ loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
+ loop_mode &= ~SPMODE_LOOP;
+ if (spi->mode & SPI_LOOP)
+ loop_mode |= SPMODE_LOOP;
+ mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode);
+
+ retval = fsl_espi_setup_transfer(spi, NULL);
+ if (retval < 0) {
+ cs->hw_mode = hw_mode; /* Restore settings */
+ return retval;
+ }
+ return 0;
+}
+
+static void fsl_espi_cleanup(struct spi_device *spi)
+{
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ kfree(cs);
+ spi_set_ctldata(spi, NULL);
+}
+
+void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ struct fsl_espi_reg *reg_base = mspi->reg_base;
+
+ /* We need handle RX first */
+ if (events & SPIE_NE) {
+ u32 rx_data, tmp;
+ u8 rx_data_8;
+
+ /* Spin until RX is done */
+ while (SPIE_RXCNT(events) < min(4, mspi->len)) {
+ cpu_relax();
+ events = mpc8xxx_spi_read_reg(&reg_base->event);
+ }
+
+ if (mspi->len >= 4) {
+ rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+ } else {
+ tmp = mspi->len;
+ rx_data = 0;
+ while (tmp--) {
+ rx_data_8 = in_8((u8 *)&reg_base->receive);
+ rx_data |= (rx_data_8 << (tmp * 8));
+ }
+
+ rx_data <<= (4 - mspi->len) * 8;
+ }
+
+ mspi->len -= 4;
+
+ if (mspi->rx)
+ mspi->get_rx(rx_data, mspi);
+ }
+
+ if (!(events & SPIE_NF)) {
+ int ret;
+
+ /* spin until TX is done */
+ ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg(
+ &reg_base->event)) & SPIE_NF) == 0, 1000, 0);
+ if (!ret) {
+ dev_err(mspi->dev, "tired waiting for SPIE_NF\n");
+ return;
+ }
+ }
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= 1;
+ if (mspi->count) {
+ u32 word = mspi->get_tx(mspi);
+
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+ } else {
+ complete(&mspi->done);
+ }
+}
+
+static irqreturn_t fsl_espi_irq(s32 irq, void *context_data)
+{
+ struct mpc8xxx_spi *mspi = context_data;
+ struct fsl_espi_reg *reg_base = mspi->reg_base;
+ irqreturn_t ret = IRQ_NONE;
+ u32 events;
+
+ /* Get interrupt events(tx/rx) */
+ events = mpc8xxx_spi_read_reg(&reg_base->event);
+ if (events)
+ ret = IRQ_HANDLED;
+
+ dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events);
+
+ fsl_espi_cpu_irq(mspi, events);
+
+ return ret;
+}
+
+static void fsl_espi_remove(struct mpc8xxx_spi *mspi)
+{
+ iounmap(mspi->reg_base);
+}
+
+static int fsl_espi_suspend(struct spi_master *master)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval &= ~SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static int fsl_espi_resume(struct spi_master *master)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ regval = mpc8xxx_spi_read_reg(&reg_base->mode);
+ regval |= SPMODE_ENABLE;
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return 0;
+}
+
+static struct spi_master * fsl_espi_probe(struct device *dev,
+ struct resource *mem, unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ struct device_node *nc;
+ const __be32 *prop;
+ u32 regval, csmode;
+ int i, len, ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+ if (!master) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(dev, master);
+
+ mpc8xxx_spi_probe(dev, mem, irq);
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ master->setup = fsl_espi_setup;
+ master->cleanup = fsl_espi_cleanup;
+ master->transfer_one_message = fsl_espi_do_one_msg;
+ master->prepare_transfer_hardware = fsl_espi_resume;
+ master->unprepare_transfer_hardware = fsl_espi_suspend;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->spi_remove = fsl_espi_remove;
+
+ mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
+ if (!mpc8xxx_spi->reg_base) {
+ ret = -ENOMEM;
+ goto err_probe;
+ }
+
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* Register for SPI Interrupt */
+ ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq,
+ 0, "fsl_espi", mpc8xxx_spi);
+ if (ret)
+ goto free_irq;
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) {
+ mpc8xxx_spi->rx_shift = 16;
+ mpc8xxx_spi->tx_shift = 24;
+ }
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ /* get chip select */
+ prop = of_get_property(nc, "reg", &len);
+ if (!prop || len < sizeof(*prop))
+ continue;
+ i = be32_to_cpup(prop);
+ if (i < 0 || i >= pdata->max_chipselect)
+ continue;
+
+ csmode = CSMODE_INIT_VAL;
+ /* check if CSBEF is set in device tree */
+ prop = of_get_property(nc, "fsl,csbef", &len);
+ if (prop && len >= sizeof(*prop)) {
+ csmode &= ~(CSMODE_BEF(0xf));
+ csmode |= CSMODE_BEF(be32_to_cpup(prop));
+ }
+ /* check if CSAFT is set in device tree */
+ prop = of_get_property(nc, "fsl,csaft", &len);
+ if (prop && len >= sizeof(*prop)) {
+ csmode &= ~(CSMODE_AFT(0xf));
+ csmode |= CSMODE_AFT(be32_to_cpup(prop));
+ }
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], csmode);
+
+ dev_info(dev, "cs=%d, init_csmode=0x%x\n", i, csmode);
+ }
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto unreg_master;
+
+ dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq);
+
+ return master;
+
+unreg_master:
+ free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
+free_irq:
+ iounmap(mpc8xxx_spi->reg_base);
+err_probe:
+ spi_master_put(master);
+err:
+ return ERR_PTR(ret);
+}
+
+static int of_fsl_espi_get_chipselects(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ const u32 *prop;
+ int len;
+
+ prop = of_get_property(np, "fsl,espi-num-chipselects", &len);
+ if (!prop || len < sizeof(*prop)) {
+ dev_err(dev, "No 'fsl,espi-num-chipselects' property\n");
+ return -EINVAL;
+ }
+
+ pdata->max_chipselect = *prop;
+ pdata->cs_control = NULL;
+
+ return 0;
+}
+
+static int of_fsl_espi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct spi_master *master;
+ struct resource mem;
+ unsigned int irq;
+ int ret = -ENOMEM;
+
+ ret = of_mpc8xxx_spi_probe(ofdev);
+ if (ret)
+ return ret;
+
+ ret = of_fsl_espi_get_chipselects(dev);
+ if (ret)
+ goto err;
+
+ ret = of_address_to_resource(np, 0, &mem);
+ if (ret)
+ goto err;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ master = fsl_espi_probe(dev, &mem, irq);
+ if (IS_ERR(master)) {
+ ret = PTR_ERR(master);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ return ret;
+}
+
+static int of_fsl_espi_remove(struct platform_device *dev)
+{
+ return mpc8xxx_spi_remove(&dev->dev);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int of_fsl_espi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ return fsl_espi_suspend(master);
+}
+
+static int of_fsl_espi_resume(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_espi_reg *reg_base;
+ u32 regval;
+ int i;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Init eSPI CS mode register */
+ for (i = 0; i < pdata->max_chipselect; i++)
+ mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops espi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(of_fsl_espi_suspend, of_fsl_espi_resume)
+};
+
+static const struct of_device_id of_fsl_espi_match[] = {
+ { .compatible = "fsl,mpc8536-espi" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_espi_match);
+
+static struct platform_driver fsl_espi_driver = {
+ .driver = {
+ .name = "fsl_espi",
+ .of_match_table = of_fsl_espi_match,
+ .pm = &espi_pm,
+ },
+ .probe = of_fsl_espi_probe,
+ .remove = of_fsl_espi_remove,
+};
+module_platform_driver(fsl_espi_driver);
+
+MODULE_AUTHOR("Mingkai Hu");
+MODULE_DESCRIPTION("Enhanced Freescale SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.c b/drivers/spi/spi-fsl-lib.c
new file mode 100644
index 000000000..cb35d2f0d
--- /dev/null
+++ b/drivers/spi/spi-fsl-lib.c
@@ -0,0 +1,183 @@
+/*
+ * Freescale SPI/eSPI controller driver library.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/spi/spi.h>
+#ifdef CONFIG_FSL_SOC
+#include <sysdev/fsl_soc.h>
+#endif
+
+#include "spi-fsl-lib.h"
+
+#define MPC8XXX_SPI_RX_BUF(type) \
+void mpc8xxx_spi_rx_buf_##type(u32 data, struct mpc8xxx_spi *mpc8xxx_spi) \
+{ \
+ type *rx = mpc8xxx_spi->rx; \
+ *rx++ = (type)(data >> mpc8xxx_spi->rx_shift); \
+ mpc8xxx_spi->rx = rx; \
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_rx_buf_##type);
+
+#define MPC8XXX_SPI_TX_BUF(type) \
+u32 mpc8xxx_spi_tx_buf_##type(struct mpc8xxx_spi *mpc8xxx_spi) \
+{ \
+ u32 data; \
+ const type *tx = mpc8xxx_spi->tx; \
+ if (!tx) \
+ return 0; \
+ data = *tx++ << mpc8xxx_spi->tx_shift; \
+ mpc8xxx_spi->tx = tx; \
+ return data; \
+} \
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_tx_buf_##type);
+
+MPC8XXX_SPI_RX_BUF(u8)
+MPC8XXX_SPI_RX_BUF(u16)
+MPC8XXX_SPI_RX_BUF(u32)
+MPC8XXX_SPI_TX_BUF(u8)
+MPC8XXX_SPI_TX_BUF(u16)
+MPC8XXX_SPI_TX_BUF(u32)
+
+struct mpc8xxx_spi_probe_info *to_of_pinfo(struct fsl_spi_platform_data *pdata)
+{
+ return container_of(pdata, struct mpc8xxx_spi_probe_info, pdata);
+}
+EXPORT_SYMBOL_GPL(to_of_pinfo);
+
+const char *mpc8xxx_spi_strmode(unsigned int flags)
+{
+ if (flags & SPI_QE_CPU_MODE) {
+ return "QE CPU";
+ } else if (flags & SPI_CPM_MODE) {
+ if (flags & SPI_QE)
+ return "QE";
+ else if (flags & SPI_CPM2)
+ return "CPM2";
+ else
+ return "CPM1";
+ }
+ return "CPU";
+}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_strmode);
+
+void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct mpc8xxx_spi *mpc8xxx_spi;
+
+ master = dev_get_drvdata(dev);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH
+ | SPI_LSB_FIRST | SPI_LOOP;
+
+ master->dev.of_node = dev->of_node;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->dev = dev;
+ mpc8xxx_spi->get_rx = mpc8xxx_spi_rx_buf_u8;
+ mpc8xxx_spi->get_tx = mpc8xxx_spi_tx_buf_u8;
+ mpc8xxx_spi->flags = pdata->flags;
+ mpc8xxx_spi->spibrg = pdata->sysclk;
+ mpc8xxx_spi->irq = irq;
+
+ mpc8xxx_spi->rx_shift = 0;
+ mpc8xxx_spi->tx_shift = 0;
+
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+
+ init_completion(&mpc8xxx_spi->done);
+}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_probe);
+
+int mpc8xxx_spi_remove(struct device *dev)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct spi_master *master;
+
+ master = dev_get_drvdata(dev);
+ mpc8xxx_spi = spi_master_get_devdata(master);
+
+ spi_unregister_master(master);
+
+ free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
+
+ if (mpc8xxx_spi->spi_remove)
+ mpc8xxx_spi->spi_remove(mpc8xxx_spi);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(mpc8xxx_spi_remove);
+
+int of_mpc8xxx_spi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct mpc8xxx_spi_probe_info *pinfo;
+ struct fsl_spi_platform_data *pdata;
+ const void *prop;
+ int ret = -ENOMEM;
+
+ pinfo = devm_kzalloc(&ofdev->dev, sizeof(*pinfo), GFP_KERNEL);
+ if (!pinfo)
+ return ret;
+
+ pdata = &pinfo->pdata;
+ dev->platform_data = pdata;
+
+ /* Allocate bus num dynamically. */
+ pdata->bus_num = -1;
+
+#ifdef CONFIG_FSL_SOC
+ /* SPI controller is either clocked from QE or SoC clock. */
+ pdata->sysclk = get_brgfreq();
+ if (pdata->sysclk == -1) {
+ pdata->sysclk = fsl_get_sys_freq();
+ if (pdata->sysclk == -1)
+ return -ENODEV;
+ }
+#else
+ ret = of_property_read_u32(np, "clock-frequency", &pdata->sysclk);
+ if (ret)
+ return ret;
+#endif
+
+ prop = of_get_property(np, "mode", NULL);
+ if (prop && !strcmp(prop, "cpu-qe"))
+ pdata->flags = SPI_QE_CPU_MODE;
+ else if (prop && !strcmp(prop, "qe"))
+ pdata->flags = SPI_CPM_MODE | SPI_QE;
+ else if (of_device_is_compatible(np, "fsl,cpm2-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM2;
+ else if (of_device_is_compatible(np, "fsl,cpm1-spi"))
+ pdata->flags = SPI_CPM_MODE | SPI_CPM1;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(of_mpc8xxx_spi_probe);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-lib.h b/drivers/spi/spi-fsl-lib.h
new file mode 100644
index 000000000..1326a392a
--- /dev/null
+++ b/drivers/spi/spi-fsl-lib.h
@@ -0,0 +1,125 @@
+/*
+ * Freescale SPI/eSPI controller driver library.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ * Copyright (C) 2006 Polycom, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#ifndef __SPI_FSL_LIB_H__
+#define __SPI_FSL_LIB_H__
+
+#include <asm/io.h>
+
+/* SPI/eSPI Controller driver's private data. */
+struct mpc8xxx_spi {
+ struct device *dev;
+ void *reg_base;
+
+ /* rx & tx bufs from the spi_transfer */
+ const void *tx;
+ void *rx;
+#if IS_ENABLED(CONFIG_SPI_FSL_ESPI)
+ int len;
+#endif
+
+ int subblock;
+ struct spi_pram __iomem *pram;
+#ifdef CONFIG_FSL_SOC
+ struct cpm_buf_desc __iomem *tx_bd;
+ struct cpm_buf_desc __iomem *rx_bd;
+#endif
+
+ struct spi_transfer *xfer_in_progress;
+
+ /* dma addresses for CPM transfers */
+ dma_addr_t tx_dma;
+ dma_addr_t rx_dma;
+ bool map_tx_dma;
+ bool map_rx_dma;
+
+ dma_addr_t dma_dummy_tx;
+ dma_addr_t dma_dummy_rx;
+
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
+ u32(*get_tx) (struct mpc8xxx_spi *);
+
+ /* hooks for different controller driver */
+ void (*spi_remove) (struct mpc8xxx_spi *mspi);
+
+ unsigned int count;
+ unsigned int irq;
+
+ unsigned nsecs; /* (clock cycle time)/2 */
+
+ u32 spibrg; /* SPIBRG input clock */
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+
+ unsigned int flags;
+
+#if IS_ENABLED(CONFIG_SPI_FSL_SPI)
+ int type;
+ int native_chipselects;
+ u8 max_bits_per_word;
+
+ void (*set_shifts)(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first);
+#endif
+
+ struct completion done;
+};
+
+struct spi_mpc8xxx_cs {
+ /* functions to deal with different sized buffers */
+ void (*get_rx) (u32 rx_data, struct mpc8xxx_spi *);
+ u32 (*get_tx) (struct mpc8xxx_spi *);
+ u32 rx_shift; /* RX data reg shift when in qe mode */
+ u32 tx_shift; /* TX data reg shift when in qe mode */
+ u32 hw_mode; /* Holds HW mode register settings */
+};
+
+static inline void mpc8xxx_spi_write_reg(__be32 __iomem *reg, u32 val)
+{
+ iowrite32be(val, reg);
+}
+
+static inline u32 mpc8xxx_spi_read_reg(__be32 __iomem *reg)
+{
+ return ioread32be(reg);
+}
+
+struct mpc8xxx_spi_probe_info {
+ struct fsl_spi_platform_data pdata;
+ int *gpios;
+ bool *alow_flags;
+};
+
+extern u32 mpc8xxx_spi_tx_buf_u8(struct mpc8xxx_spi *mpc8xxx_spi);
+extern u32 mpc8xxx_spi_tx_buf_u16(struct mpc8xxx_spi *mpc8xxx_spi);
+extern u32 mpc8xxx_spi_tx_buf_u32(struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u8(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u16(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+extern void mpc8xxx_spi_rx_buf_u32(u32 data, struct mpc8xxx_spi *mpc8xxx_spi);
+
+extern struct mpc8xxx_spi_probe_info *to_of_pinfo(
+ struct fsl_spi_platform_data *pdata);
+extern int mpc8xxx_spi_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, unsigned int len);
+extern const char *mpc8xxx_spi_strmode(unsigned int flags);
+extern void mpc8xxx_spi_probe(struct device *dev, struct resource *mem,
+ unsigned int irq);
+extern int mpc8xxx_spi_remove(struct device *dev);
+extern int of_mpc8xxx_spi_probe(struct platform_device *ofdev);
+
+#endif /* __SPI_FSL_LIB_H__ */
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
new file mode 100644
index 000000000..60c590790
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.c
@@ -0,0 +1,965 @@
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/fsl_devices.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+
+#include "spi-fsl-lib.h"
+#include "spi-fsl-cpm.h"
+#include "spi-fsl-spi.h"
+
+#define TYPE_FSL 0
+#define TYPE_GRLIB 1
+
+struct fsl_spi_match_data {
+ int type;
+};
+
+static struct fsl_spi_match_data of_fsl_spi_fsl_config = {
+ .type = TYPE_FSL,
+};
+
+static struct fsl_spi_match_data of_fsl_spi_grlib_config = {
+ .type = TYPE_GRLIB,
+};
+
+static const struct of_device_id of_fsl_spi_match[] = {
+ {
+ .compatible = "fsl,spi",
+ .data = &of_fsl_spi_fsl_config,
+ },
+ {
+ .compatible = "aeroflexgaisler,spictrl",
+ .data = &of_fsl_spi_grlib_config,
+ },
+ {}
+};
+MODULE_DEVICE_TABLE(of, of_fsl_spi_match);
+
+static int fsl_spi_get_type(struct device *dev)
+{
+ const struct of_device_id *match;
+
+ if (dev->of_node) {
+ match = of_match_node(of_fsl_spi_match, dev->of_node);
+ if (match && match->data)
+ return ((struct fsl_spi_match_data *)match->data)->type;
+ }
+ return TYPE_FSL;
+}
+
+static void fsl_spi_change_mode(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master);
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+ __be32 __iomem *mode = &reg_base->mode;
+ unsigned long flags;
+
+ if (cs->hw_mode == mpc8xxx_spi_read_reg(mode))
+ return;
+
+ /* Turn off IRQs locally to minimize time that SPI is disabled. */
+ local_irq_save(flags);
+
+ /* Turn off SPI unit prior changing mode */
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode & ~SPMODE_ENABLE);
+
+ /* When in CPM mode, we need to reinit tx and rx. */
+ if (mspi->flags & SPI_CPM_MODE) {
+ fsl_spi_cpm_reinit_txrx(mspi);
+ }
+ mpc8xxx_spi_write_reg(mode, cs->hw_mode);
+ local_irq_restore(flags);
+}
+
+static void fsl_spi_chipselect(struct spi_device *spi, int value)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_platform_data *pdata;
+ bool pol = spi->mode & SPI_CS_HIGH;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
+ pdata = spi->dev.parent->parent->platform_data;
+
+ if (value == BITBANG_CS_INACTIVE) {
+ if (pdata->cs_control)
+ pdata->cs_control(spi, !pol);
+ }
+
+ if (value == BITBANG_CS_ACTIVE) {
+ mpc8xxx_spi->rx_shift = cs->rx_shift;
+ mpc8xxx_spi->tx_shift = cs->tx_shift;
+ mpc8xxx_spi->get_rx = cs->get_rx;
+ mpc8xxx_spi->get_tx = cs->get_tx;
+
+ fsl_spi_change_mode(spi);
+
+ if (pdata->cs_control)
+ pdata->cs_control(spi, pol);
+ }
+}
+
+static void fsl_spi_qe_cpu_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (msb_first) {
+ if (bits_per_word <= 8) {
+ *rx_shift = 16;
+ *tx_shift = 24;
+ } else if (bits_per_word <= 16) {
+ *rx_shift = 16;
+ *tx_shift = 16;
+ }
+ } else {
+ if (bits_per_word <= 8)
+ *rx_shift = 8;
+ }
+}
+
+static void fsl_spi_grlib_set_shifts(u32 *rx_shift, u32 *tx_shift,
+ int bits_per_word, int msb_first)
+{
+ *rx_shift = 0;
+ *tx_shift = 0;
+ if (bits_per_word <= 16) {
+ if (msb_first) {
+ *rx_shift = 16; /* LSB in bit 16 */
+ *tx_shift = 32 - bits_per_word; /* MSB in bit 31 */
+ } else {
+ *rx_shift = 16 - bits_per_word; /* MSB in bit 15 */
+ }
+ }
+}
+
+static int mspi_apply_cpu_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ struct spi_device *spi,
+ struct mpc8xxx_spi *mpc8xxx_spi,
+ int bits_per_word)
+{
+ cs->rx_shift = 0;
+ cs->tx_shift = 0;
+ if (bits_per_word <= 8) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u8;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u8;
+ } else if (bits_per_word <= 16) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u16;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u16;
+ } else if (bits_per_word <= 32) {
+ cs->get_rx = mpc8xxx_spi_rx_buf_u32;
+ cs->get_tx = mpc8xxx_spi_tx_buf_u32;
+ } else
+ return -EINVAL;
+
+ if (mpc8xxx_spi->set_shifts)
+ mpc8xxx_spi->set_shifts(&cs->rx_shift, &cs->tx_shift,
+ bits_per_word,
+ !(spi->mode & SPI_LSB_FIRST));
+
+ mpc8xxx_spi->rx_shift = cs->rx_shift;
+ mpc8xxx_spi->tx_shift = cs->tx_shift;
+ mpc8xxx_spi->get_rx = cs->get_rx;
+ mpc8xxx_spi->get_tx = cs->get_tx;
+
+ return bits_per_word;
+}
+
+static int mspi_apply_qe_mode_quirks(struct spi_mpc8xxx_cs *cs,
+ struct spi_device *spi,
+ int bits_per_word)
+{
+ /* QE uses Little Endian for words > 8
+ * so transform all words > 8 into 8 bits
+ * Unfortnatly that doesn't work for LSB so
+ * reject these for now */
+ /* Note: 32 bits word, LSB works iff
+ * tfcr/rfcr is set to CPMFCR_GBL */
+ if (spi->mode & SPI_LSB_FIRST &&
+ bits_per_word > 8)
+ return -EINVAL;
+ if (bits_per_word > 8)
+ return 8; /* pretend its 8 bits */
+ return bits_per_word;
+}
+
+static int fsl_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ int bits_per_word = 0;
+ u8 pm;
+ u32 hz = 0;
+ struct spi_mpc8xxx_cs *cs = spi->controller_state;
+
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ if (t) {
+ bits_per_word = t->bits_per_word;
+ hz = t->speed_hz;
+ }
+
+ /* spi_transfer level calls that work per-word */
+ if (!bits_per_word)
+ bits_per_word = spi->bits_per_word;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ if (!(mpc8xxx_spi->flags & SPI_CPM_MODE))
+ bits_per_word = mspi_apply_cpu_mode_quirks(cs, spi,
+ mpc8xxx_spi,
+ bits_per_word);
+ else if (mpc8xxx_spi->flags & SPI_QE)
+ bits_per_word = mspi_apply_qe_mode_quirks(cs, spi,
+ bits_per_word);
+
+ if (bits_per_word < 0)
+ return bits_per_word;
+
+ if (bits_per_word == 32)
+ bits_per_word = 0;
+ else
+ bits_per_word = bits_per_word - 1;
+
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(SPMODE_LEN(0xF) | SPMODE_DIV16
+ | SPMODE_PM(0xF));
+
+ cs->hw_mode |= SPMODE_LEN(bits_per_word);
+
+ if ((mpc8xxx_spi->spibrg / hz) > 64) {
+ cs->hw_mode |= SPMODE_DIV16;
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 64) + 1;
+
+ WARN_ONCE(pm > 16, "%s: Requested speed is too low: %d Hz. "
+ "Will use %d Hz instead.\n", dev_name(&spi->dev),
+ hz, mpc8xxx_spi->spibrg / 1024);
+ if (pm > 16)
+ pm = 16;
+ } else {
+ pm = (mpc8xxx_spi->spibrg - 1) / (hz * 4) + 1;
+ }
+ if (pm)
+ pm--;
+
+ cs->hw_mode |= SPMODE_PM(pm);
+
+ fsl_spi_change_mode(spi);
+ return 0;
+}
+
+static int fsl_spi_cpu_bufs(struct mpc8xxx_spi *mspi,
+ struct spi_transfer *t, unsigned int len)
+{
+ u32 word;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ mspi->count = len;
+
+ /* enable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE);
+
+ /* transmit word */
+ word = mspi->get_tx(mspi);
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+
+ return 0;
+}
+
+static int fsl_spi_bufs(struct spi_device *spi, struct spi_transfer *t,
+ bool is_dma_mapped)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg *reg_base;
+ unsigned int len = t->len;
+ u8 bits_per_word;
+ int ret;
+
+ reg_base = mpc8xxx_spi->reg_base;
+ bits_per_word = spi->bits_per_word;
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (bits_per_word > 8) {
+ /* invalid length? */
+ if (len & 1)
+ return -EINVAL;
+ len /= 2;
+ }
+ if (bits_per_word > 16) {
+ /* invalid length? */
+ if (len & 1)
+ return -EINVAL;
+ len /= 2;
+ }
+
+ mpc8xxx_spi->tx = t->tx_buf;
+ mpc8xxx_spi->rx = t->rx_buf;
+
+ reinit_completion(&mpc8xxx_spi->done);
+
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ ret = fsl_spi_cpm_bufs(mpc8xxx_spi, t, is_dma_mapped);
+ else
+ ret = fsl_spi_cpu_bufs(mpc8xxx_spi, t, len);
+ if (ret)
+ return ret;
+
+ wait_for_completion(&mpc8xxx_spi->done);
+
+ /* disable rx ints */
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+
+ if (mpc8xxx_spi->flags & SPI_CPM_MODE)
+ fsl_spi_cpm_bufs_complete(mpc8xxx_spi);
+
+ return mpc8xxx_spi->count;
+}
+
+static int fsl_spi_do_one_msg(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t, *first;
+ unsigned int cs_change;
+ const int nsecs = 50;
+ int status;
+
+ /* Don't allow changes if CS is active */
+ first = list_first_entry(&m->transfers, struct spi_transfer,
+ transfer_list);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if ((first->bits_per_word != t->bits_per_word) ||
+ (first->speed_hz != t->speed_hz)) {
+ dev_err(&spi->dev,
+ "bits_per_word/speed_hz should be same for the same SPI transfer\n");
+ return -EINVAL;
+ }
+ }
+
+ cs_change = 1;
+ status = -EINVAL;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ if (cs_change)
+ status = fsl_spi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change) {
+ fsl_spi_chipselect(spi, BITBANG_CS_ACTIVE);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
+ if (t->len)
+ status = fsl_spi_bufs(spi, t, m->is_dma_mapped);
+ if (status) {
+ status = -EMSGSIZE;
+ break;
+ }
+ m->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+ ndelay(nsecs);
+ }
+ }
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ if (status || !cs_change) {
+ ndelay(nsecs);
+ fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+ }
+
+ fsl_spi_setup_transfer(spi, NULL);
+ return 0;
+}
+
+static int fsl_spi_setup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_spi_reg *reg_base;
+ int retval;
+ u32 hw_mode;
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi_set_ctldata(spi, cs);
+ }
+ mpc8xxx_spi = spi_master_get_devdata(spi->master);
+
+ reg_base = mpc8xxx_spi->reg_base;
+
+ hw_mode = cs->hw_mode; /* Save original settings */
+ cs->hw_mode = mpc8xxx_spi_read_reg(&reg_base->mode);
+ /* mask out bits we are going to set */
+ cs->hw_mode &= ~(SPMODE_CP_BEGIN_EDGECLK | SPMODE_CI_INACTIVEHIGH
+ | SPMODE_REV | SPMODE_LOOP);
+
+ if (spi->mode & SPI_CPHA)
+ cs->hw_mode |= SPMODE_CP_BEGIN_EDGECLK;
+ if (spi->mode & SPI_CPOL)
+ cs->hw_mode |= SPMODE_CI_INACTIVEHIGH;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ cs->hw_mode |= SPMODE_REV;
+ if (spi->mode & SPI_LOOP)
+ cs->hw_mode |= SPMODE_LOOP;
+
+ retval = fsl_spi_setup_transfer(spi, NULL);
+ if (retval < 0) {
+ cs->hw_mode = hw_mode; /* Restore settings */
+ return retval;
+ }
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB) {
+ if (gpio_is_valid(spi->cs_gpio)) {
+ int desel;
+
+ retval = gpio_request(spi->cs_gpio,
+ dev_name(&spi->dev));
+ if (retval)
+ return retval;
+
+ desel = !(spi->mode & SPI_CS_HIGH);
+ retval = gpio_direction_output(spi->cs_gpio, desel);
+ if (retval) {
+ gpio_free(spi->cs_gpio);
+ return retval;
+ }
+ } else if (spi->cs_gpio != -ENOENT) {
+ if (spi->cs_gpio < 0)
+ return spi->cs_gpio;
+ return -EINVAL;
+ }
+ /* When spi->cs_gpio == -ENOENT, a hole in the phandle list
+ * indicates to use native chipselect if present, or allow for
+ * an always selected chip
+ */
+ }
+
+ /* Initialize chipselect - might be active for SPI_CS_HIGH mode */
+ fsl_spi_chipselect(spi, BITBANG_CS_INACTIVE);
+
+ return 0;
+}
+
+static void fsl_spi_cleanup(struct spi_device *spi)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct spi_mpc8xxx_cs *cs = spi_get_ctldata(spi);
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB && gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
+
+ kfree(cs);
+ spi_set_ctldata(spi, NULL);
+}
+
+static void fsl_spi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events)
+{
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ /* We need handle RX first */
+ if (events & SPIE_NE) {
+ u32 rx_data = mpc8xxx_spi_read_reg(&reg_base->receive);
+
+ if (mspi->rx)
+ mspi->get_rx(rx_data, mspi);
+ }
+
+ if ((events & SPIE_NF) == 0)
+ /* spin until TX is done */
+ while (((events =
+ mpc8xxx_spi_read_reg(&reg_base->event)) &
+ SPIE_NF) == 0)
+ cpu_relax();
+
+ /* Clear the events */
+ mpc8xxx_spi_write_reg(&reg_base->event, events);
+
+ mspi->count -= 1;
+ if (mspi->count) {
+ u32 word = mspi->get_tx(mspi);
+
+ mpc8xxx_spi_write_reg(&reg_base->transmit, word);
+ } else {
+ complete(&mspi->done);
+ }
+}
+
+static irqreturn_t fsl_spi_irq(s32 irq, void *context_data)
+{
+ struct mpc8xxx_spi *mspi = context_data;
+ irqreturn_t ret = IRQ_NONE;
+ u32 events;
+ struct fsl_spi_reg *reg_base = mspi->reg_base;
+
+ /* Get interrupt events(tx/rx) */
+ events = mpc8xxx_spi_read_reg(&reg_base->event);
+ if (events)
+ ret = IRQ_HANDLED;
+
+ dev_dbg(mspi->dev, "%s: events %x\n", __func__, events);
+
+ if (mspi->flags & SPI_CPM_MODE)
+ fsl_spi_cpm_irq(mspi, events);
+ else
+ fsl_spi_cpu_irq(mspi, events);
+
+ return ret;
+}
+
+static void fsl_spi_remove(struct mpc8xxx_spi *mspi)
+{
+ iounmap(mspi->reg_base);
+ fsl_spi_cpm_free(mspi);
+}
+
+static void fsl_spi_grlib_cs_control(struct spi_device *spi, bool on)
+{
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ u32 slvsel;
+ u16 cs = spi->chip_select;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_set_value(spi->cs_gpio, on);
+ } else if (cs < mpc8xxx_spi->native_chipselects) {
+ slvsel = mpc8xxx_spi_read_reg(&reg_base->slvsel);
+ slvsel = on ? (slvsel | (1 << cs)) : (slvsel & ~(1 << cs));
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, slvsel);
+ }
+}
+
+static void fsl_spi_grlib_probe(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ struct fsl_spi_reg *reg_base = mpc8xxx_spi->reg_base;
+ int mbits;
+ u32 capabilities;
+
+ capabilities = mpc8xxx_spi_read_reg(&reg_base->cap);
+
+ mpc8xxx_spi->set_shifts = fsl_spi_grlib_set_shifts;
+ mbits = SPCAP_MAXWLEN(capabilities);
+ if (mbits)
+ mpc8xxx_spi->max_bits_per_word = mbits + 1;
+
+ mpc8xxx_spi->native_chipselects = 0;
+ if (SPCAP_SSEN(capabilities)) {
+ mpc8xxx_spi->native_chipselects = SPCAP_SSSZ(capabilities);
+ mpc8xxx_spi_write_reg(&reg_base->slvsel, 0xffffffff);
+ }
+ master->num_chipselect = mpc8xxx_spi->native_chipselects;
+ pdata->cs_control = fsl_spi_grlib_cs_control;
+}
+
+static struct spi_master * fsl_spi_probe(struct device *dev,
+ struct resource *mem, unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct spi_master *master;
+ struct mpc8xxx_spi *mpc8xxx_spi;
+ struct fsl_spi_reg *reg_base;
+ u32 regval;
+ int ret = 0;
+
+ master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi));
+ if (master == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ dev_set_drvdata(dev, master);
+
+ mpc8xxx_spi_probe(dev, mem, irq);
+
+ master->setup = fsl_spi_setup;
+ master->cleanup = fsl_spi_cleanup;
+ master->transfer_one_message = fsl_spi_do_one_msg;
+
+ mpc8xxx_spi = spi_master_get_devdata(master);
+ mpc8xxx_spi->spi_remove = fsl_spi_remove;
+ mpc8xxx_spi->max_bits_per_word = 32;
+ mpc8xxx_spi->type = fsl_spi_get_type(dev);
+
+ ret = fsl_spi_cpm_init(mpc8xxx_spi);
+ if (ret)
+ goto err_cpm_init;
+
+ mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem));
+ if (mpc8xxx_spi->reg_base == NULL) {
+ ret = -ENOMEM;
+ goto err_ioremap;
+ }
+
+ if (mpc8xxx_spi->type == TYPE_GRLIB)
+ fsl_spi_grlib_probe(dev);
+
+ master->bits_per_word_mask =
+ (SPI_BPW_RANGE_MASK(4, 16) | SPI_BPW_MASK(32)) &
+ SPI_BPW_RANGE_MASK(1, mpc8xxx_spi->max_bits_per_word);
+
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ mpc8xxx_spi->set_shifts = fsl_spi_qe_cpu_set_shifts;
+
+ if (mpc8xxx_spi->set_shifts)
+ /* 8 bits per word and MSB first */
+ mpc8xxx_spi->set_shifts(&mpc8xxx_spi->rx_shift,
+ &mpc8xxx_spi->tx_shift, 8, 1);
+
+ /* Register for SPI Interrupt */
+ ret = request_irq(mpc8xxx_spi->irq, fsl_spi_irq,
+ 0, "fsl_spi", mpc8xxx_spi);
+
+ if (ret != 0)
+ goto free_irq;
+
+ reg_base = mpc8xxx_spi->reg_base;
+
+ /* SPI controller initializations */
+ mpc8xxx_spi_write_reg(&reg_base->mode, 0);
+ mpc8xxx_spi_write_reg(&reg_base->mask, 0);
+ mpc8xxx_spi_write_reg(&reg_base->command, 0);
+ mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff);
+
+ /* Enable SPI interface */
+ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE;
+ if (mpc8xxx_spi->max_bits_per_word < 8) {
+ regval &= ~SPMODE_LEN(0xF);
+ regval |= SPMODE_LEN(mpc8xxx_spi->max_bits_per_word - 1);
+ }
+ if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE)
+ regval |= SPMODE_OP;
+
+ mpc8xxx_spi_write_reg(&reg_base->mode, regval);
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto unreg_master;
+
+ dev_info(dev, "at 0x%p (irq = %d), %s mode\n", reg_base,
+ mpc8xxx_spi->irq, mpc8xxx_spi_strmode(mpc8xxx_spi->flags));
+
+ return master;
+
+unreg_master:
+ free_irq(mpc8xxx_spi->irq, mpc8xxx_spi);
+free_irq:
+ iounmap(mpc8xxx_spi->reg_base);
+err_ioremap:
+ fsl_spi_cpm_free(mpc8xxx_spi);
+err_cpm_init:
+ spi_master_put(master);
+err:
+ return ERR_PTR(ret);
+}
+
+static void fsl_spi_cs_control(struct spi_device *spi, bool on)
+{
+ struct device *dev = spi->dev.parent->parent;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ u16 cs = spi->chip_select;
+ int gpio = pinfo->gpios[cs];
+ bool alow = pinfo->alow_flags[cs];
+
+ gpio_set_value(gpio, on ^ alow);
+}
+
+static int of_fsl_spi_get_chipselects(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ int ngpios;
+ int i = 0;
+ int ret;
+
+ ngpios = of_gpio_count(np);
+ if (ngpios <= 0) {
+ /*
+ * SPI w/o chip-select line. One SPI device is still permitted
+ * though.
+ */
+ pdata->max_chipselect = 1;
+ return 0;
+ }
+
+ pinfo->gpios = kmalloc(ngpios * sizeof(*pinfo->gpios), GFP_KERNEL);
+ if (!pinfo->gpios)
+ return -ENOMEM;
+ memset(pinfo->gpios, -1, ngpios * sizeof(*pinfo->gpios));
+
+ pinfo->alow_flags = kzalloc(ngpios * sizeof(*pinfo->alow_flags),
+ GFP_KERNEL);
+ if (!pinfo->alow_flags) {
+ ret = -ENOMEM;
+ goto err_alloc_flags;
+ }
+
+ for (; i < ngpios; i++) {
+ int gpio;
+ enum of_gpio_flags flags;
+
+ gpio = of_get_gpio_flags(np, i, &flags);
+ if (!gpio_is_valid(gpio)) {
+ dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
+ ret = gpio;
+ goto err_loop;
+ }
+
+ ret = gpio_request(gpio, dev_name(dev));
+ if (ret) {
+ dev_err(dev, "can't request gpio #%d: %d\n", i, ret);
+ goto err_loop;
+ }
+
+ pinfo->gpios[i] = gpio;
+ pinfo->alow_flags[i] = flags & OF_GPIO_ACTIVE_LOW;
+
+ ret = gpio_direction_output(pinfo->gpios[i],
+ pinfo->alow_flags[i]);
+ if (ret) {
+ dev_err(dev, "can't set output direction for gpio "
+ "#%d: %d\n", i, ret);
+ goto err_loop;
+ }
+ }
+
+ pdata->max_chipselect = ngpios;
+ pdata->cs_control = fsl_spi_cs_control;
+
+ return 0;
+
+err_loop:
+ while (i >= 0) {
+ if (gpio_is_valid(pinfo->gpios[i]))
+ gpio_free(pinfo->gpios[i]);
+ i--;
+ }
+
+ kfree(pinfo->alow_flags);
+ pinfo->alow_flags = NULL;
+err_alloc_flags:
+ kfree(pinfo->gpios);
+ pinfo->gpios = NULL;
+ return ret;
+}
+
+static int of_fsl_spi_free_chipselects(struct device *dev)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc8xxx_spi_probe_info *pinfo = to_of_pinfo(pdata);
+ int i;
+
+ if (!pinfo->gpios)
+ return 0;
+
+ for (i = 0; i < pdata->max_chipselect; i++) {
+ if (gpio_is_valid(pinfo->gpios[i]))
+ gpio_free(pinfo->gpios[i]);
+ }
+
+ kfree(pinfo->gpios);
+ kfree(pinfo->alow_flags);
+ return 0;
+}
+
+static int of_fsl_spi_probe(struct platform_device *ofdev)
+{
+ struct device *dev = &ofdev->dev;
+ struct device_node *np = ofdev->dev.of_node;
+ struct spi_master *master;
+ struct resource mem;
+ int irq, type;
+ int ret = -ENOMEM;
+
+ ret = of_mpc8xxx_spi_probe(ofdev);
+ if (ret)
+ return ret;
+
+ type = fsl_spi_get_type(&ofdev->dev);
+ if (type == TYPE_FSL) {
+ ret = of_fsl_spi_get_chipselects(dev);
+ if (ret)
+ goto err;
+ }
+
+ ret = of_address_to_resource(np, 0, &mem);
+ if (ret)
+ goto err;
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ master = fsl_spi_probe(dev, &mem, irq);
+ if (IS_ERR(master)) {
+ ret = PTR_ERR(master);
+ goto err;
+ }
+
+ return 0;
+
+err:
+ if (type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(dev);
+ return ret;
+}
+
+static int of_fsl_spi_remove(struct platform_device *ofdev)
+{
+ struct spi_master *master = platform_get_drvdata(ofdev);
+ struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = mpc8xxx_spi_remove(&ofdev->dev);
+ if (ret)
+ return ret;
+ if (mpc8xxx_spi->type == TYPE_FSL)
+ of_fsl_spi_free_chipselects(&ofdev->dev);
+ return 0;
+}
+
+static struct platform_driver of_fsl_spi_driver = {
+ .driver = {
+ .name = "fsl_spi",
+ .of_match_table = of_fsl_spi_match,
+ },
+ .probe = of_fsl_spi_probe,
+ .remove = of_fsl_spi_remove,
+};
+
+#ifdef CONFIG_MPC832x_RDB
+/*
+ * XXX XXX XXX
+ * This is "legacy" platform driver, was used by the MPC8323E-RDB boards
+ * only. The driver should go away soon, since newer MPC8323E-RDB's device
+ * tree can work with OpenFirmware driver. But for now we support old trees
+ * as well.
+ */
+static int plat_mpc8xxx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem;
+ int irq;
+ struct spi_master *master;
+
+ if (!dev_get_platdata(&pdev->dev))
+ return -EINVAL;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem)
+ return -EINVAL;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq <= 0)
+ return -EINVAL;
+
+ master = fsl_spi_probe(&pdev->dev, mem, irq);
+ return PTR_ERR_OR_ZERO(master);
+}
+
+static int plat_mpc8xxx_spi_remove(struct platform_device *pdev)
+{
+ return mpc8xxx_spi_remove(&pdev->dev);
+}
+
+MODULE_ALIAS("platform:mpc8xxx_spi");
+static struct platform_driver mpc8xxx_spi_driver = {
+ .probe = plat_mpc8xxx_spi_probe,
+ .remove = plat_mpc8xxx_spi_remove,
+ .driver = {
+ .name = "mpc8xxx_spi",
+ },
+};
+
+static bool legacy_driver_failed;
+
+static void __init legacy_driver_register(void)
+{
+ legacy_driver_failed = platform_driver_register(&mpc8xxx_spi_driver);
+}
+
+static void __exit legacy_driver_unregister(void)
+{
+ if (legacy_driver_failed)
+ return;
+ platform_driver_unregister(&mpc8xxx_spi_driver);
+}
+#else
+static void __init legacy_driver_register(void) {}
+static void __exit legacy_driver_unregister(void) {}
+#endif /* CONFIG_MPC832x_RDB */
+
+static int __init fsl_spi_init(void)
+{
+ legacy_driver_register();
+ return platform_driver_register(&of_fsl_spi_driver);
+}
+module_init(fsl_spi_init);
+
+static void __exit fsl_spi_exit(void)
+{
+ platform_driver_unregister(&of_fsl_spi_driver);
+ legacy_driver_unregister();
+}
+module_exit(fsl_spi_exit);
+
+MODULE_AUTHOR("Kumar Gala");
+MODULE_DESCRIPTION("Simple Freescale SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-fsl-spi.h b/drivers/spi/spi-fsl-spi.h
new file mode 100644
index 000000000..9a6dae00e
--- /dev/null
+++ b/drivers/spi/spi-fsl-spi.h
@@ -0,0 +1,72 @@
+/*
+ * Freescale SPI controller driver.
+ *
+ * Maintainer: Kumar Gala
+ *
+ * Copyright (C) 2006 Polycom, Inc.
+ * Copyright 2010 Freescale Semiconductor, Inc.
+ *
+ * CPM SPI and QE buffer descriptors mode support:
+ * Copyright (c) 2009 MontaVista Software, Inc.
+ * Author: Anton Vorontsov <avorontsov@ru.mvista.com>
+ *
+ * GRLIB support:
+ * Copyright (c) 2012 Aeroflex Gaisler AB.
+ * Author: Andreas Larsson <andreas@gaisler.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __SPI_FSL_SPI_H__
+#define __SPI_FSL_SPI_H__
+
+/* SPI Controller registers */
+struct fsl_spi_reg {
+ __be32 cap; /* TYPE_GRLIB specific */
+ u8 res1[0x1C];
+ __be32 mode;
+ __be32 event;
+ __be32 mask;
+ __be32 command;
+ __be32 transmit;
+ __be32 receive;
+ __be32 slvsel; /* TYPE_GRLIB specific */
+};
+
+/* SPI Controller mode register definitions */
+#define SPMODE_LOOP (1 << 30)
+#define SPMODE_CI_INACTIVEHIGH (1 << 29)
+#define SPMODE_CP_BEGIN_EDGECLK (1 << 28)
+#define SPMODE_DIV16 (1 << 27)
+#define SPMODE_REV (1 << 26)
+#define SPMODE_MS (1 << 25)
+#define SPMODE_ENABLE (1 << 24)
+#define SPMODE_LEN(x) ((x) << 20)
+#define SPMODE_PM(x) ((x) << 16)
+#define SPMODE_OP (1 << 14)
+#define SPMODE_CG(x) ((x) << 7)
+
+/* TYPE_GRLIB SPI Controller capability register definitions */
+#define SPCAP_SSEN(x) (((x) >> 16) & 0x1)
+#define SPCAP_SSSZ(x) (((x) >> 24) & 0xff)
+#define SPCAP_MAXWLEN(x) (((x) >> 20) & 0xf)
+
+/*
+ * Default for SPI Mode:
+ * SPI MODE 0 (inactive low, phase middle, MSB, 8-bit length, slow clk
+ */
+#define SPMODE_INIT_VAL (SPMODE_CI_INACTIVEHIGH | SPMODE_DIV16 | SPMODE_REV | \
+ SPMODE_MS | SPMODE_LEN(7) | SPMODE_PM(0xf))
+
+/* SPIE register values */
+#define SPIE_NE 0x00000200 /* Not empty */
+#define SPIE_NF 0x00000100 /* Not full */
+
+/* SPIM register values */
+#define SPIM_NE 0x00000200 /* Not empty */
+#define SPIM_NF 0x00000100 /* Not full */
+
+#endif /* __SPI_FSL_SPI_H__ */
diff --git a/drivers/spi/spi-gpio.c b/drivers/spi/spi-gpio.c
new file mode 100644
index 000000000..1c34c9314
--- /dev/null
+++ b/drivers/spi/spi-gpio.c
@@ -0,0 +1,547 @@
+/*
+ * SPI master driver using generic bitbanged GPIO
+ *
+ * Copyright (C) 2006,2008 David Brownell
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_gpio.h>
+
+
+/*
+ * This bitbanging SPI master driver should help make systems usable
+ * when a native hardware SPI engine is not available, perhaps because
+ * its driver isn't yet working or because the I/O pins it requires
+ * are used for other purposes.
+ *
+ * platform_device->driver_data ... points to spi_gpio
+ *
+ * spi->controller_state ... reserved for bitbang framework code
+ * spi->controller_data ... holds chipselect GPIO
+ *
+ * spi->master->dev.driver_data ... points to spi_gpio->bitbang
+ */
+
+struct spi_gpio {
+ struct spi_bitbang bitbang;
+ struct spi_gpio_platform_data pdata;
+ struct platform_device *pdev;
+ unsigned long cs_gpios[0];
+};
+
+/*----------------------------------------------------------------------*/
+
+/*
+ * Because the overhead of going through four GPIO procedure calls
+ * per transferred bit can make performance a problem, this code
+ * is set up so that you can use it in either of two ways:
+ *
+ * - The slow generic way: set up platform_data to hold the GPIO
+ * numbers used for MISO/MOSI/SCK, and issue procedure calls for
+ * each of them. This driver can handle several such busses.
+ *
+ * - The quicker inlined way: only helps with platform GPIO code
+ * that inlines operations for constant GPIOs. This can give
+ * you tight (fast!) inner loops, but each such bus needs a
+ * new driver. You'll define a new C file, with Makefile and
+ * Kconfig support; the C code can be a total of six lines:
+ *
+ * #define DRIVER_NAME "myboard_spi2"
+ * #define SPI_MISO_GPIO 119
+ * #define SPI_MOSI_GPIO 120
+ * #define SPI_SCK_GPIO 121
+ * #define SPI_N_CHIPSEL 4
+ * #include "spi-gpio.c"
+ */
+
+#ifndef DRIVER_NAME
+#define DRIVER_NAME "spi_gpio"
+
+#define GENERIC_BITBANG /* vs tight inlines */
+
+/* all functions referencing these symbols must define pdata */
+#define SPI_MISO_GPIO ((pdata)->miso)
+#define SPI_MOSI_GPIO ((pdata)->mosi)
+#define SPI_SCK_GPIO ((pdata)->sck)
+
+#define SPI_N_CHIPSEL ((pdata)->num_chipselect)
+
+#endif
+
+/*----------------------------------------------------------------------*/
+
+static inline struct spi_gpio *__pure
+spi_to_spi_gpio(const struct spi_device *spi)
+{
+ const struct spi_bitbang *bang;
+ struct spi_gpio *spi_gpio;
+
+ bang = spi_master_get_devdata(spi->master);
+ spi_gpio = container_of(bang, struct spi_gpio, bitbang);
+ return spi_gpio;
+}
+
+static inline struct spi_gpio_platform_data *__pure
+spi_to_pdata(const struct spi_device *spi)
+{
+ return &spi_to_spi_gpio(spi)->pdata;
+}
+
+/* this is #defined to avoid unused-variable warnings when inlining */
+#define pdata spi_to_pdata(spi)
+
+static inline void setsck(const struct spi_device *spi, int is_on)
+{
+ gpio_set_value_cansleep(SPI_SCK_GPIO, is_on);
+}
+
+static inline void setmosi(const struct spi_device *spi, int is_on)
+{
+ gpio_set_value_cansleep(SPI_MOSI_GPIO, is_on);
+}
+
+static inline int getmiso(const struct spi_device *spi)
+{
+ return !!gpio_get_value_cansleep(SPI_MISO_GPIO);
+}
+
+#undef pdata
+
+/*
+ * NOTE: this clocks "as fast as we can". It "should" be a function of the
+ * requested device clock. Software overhead means we usually have trouble
+ * reaching even one Mbit/sec (except when we can inline bitops), so for now
+ * we'll just assume we never need additional per-bit slowdowns.
+ */
+#define spidelay(nsecs) do {} while (0)
+
+#include "spi-bitbang-txrx.h"
+
+/*
+ * These functions can leverage inline expansion of GPIO calls to shrink
+ * costs for a txrx bit, often by factors of around ten (by instruction
+ * count). That is particularly visible for larger word sizes, but helps
+ * even with default 8-bit words.
+ *
+ * REVISIT overheads calling these functions for each word also have
+ * significant performance costs. Having txrx_bufs() calls that inline
+ * the txrx_word() logic would help performance, e.g. on larger blocks
+ * used with flash storage or MMC/SD. There should also be ways to make
+ * GCC be less stupid about reloading registers inside the I/O loops,
+ * even without inlined GPIO calls; __attribute__((hot)) on GCC 4.3?
+ */
+
+static u32 spi_gpio_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+}
+
+static u32 spi_gpio_txrx_word_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+}
+
+/*
+ * These functions do not call setmosi or getmiso if respective flag
+ * (SPI_MASTER_NO_RX or SPI_MASTER_NO_TX) is set, so they are safe to
+ * call when such pin is not present or defined in the controller.
+ * A separate set of callbacks is defined to get highest possible
+ * speed in the generic case (when both MISO and MOSI lines are
+ * available), as optimiser will remove the checks when argument is
+ * constant.
+ */
+
+static u32 spi_gpio_spec_txrx_word_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, flags, word, bits);
+}
+
+static u32 spi_gpio_spec_txrx_word_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ unsigned flags = spi->master->flags;
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, flags, word, bits);
+}
+
+/*----------------------------------------------------------------------*/
+
+static void spi_gpio_chipselect(struct spi_device *spi, int is_active)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
+
+ /* set initial clock polarity */
+ if (is_active)
+ setsck(spi, spi->mode & SPI_CPOL);
+
+ if (cs != SPI_GPIO_NO_CHIPSELECT) {
+ /* SPI is normally active-low */
+ gpio_set_value_cansleep(cs, (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ }
+}
+
+static int spi_gpio_setup(struct spi_device *spi)
+{
+ unsigned long cs;
+ int status = 0;
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ struct device_node *np = spi->master->dev.of_node;
+
+ if (np) {
+ /*
+ * In DT environments, the CS GPIOs have already been
+ * initialized from the "cs-gpios" property of the node.
+ */
+ cs = spi_gpio->cs_gpios[spi->chip_select];
+ } else {
+ /*
+ * ... otherwise, take it from spi->controller_data
+ */
+ cs = (uintptr_t) spi->controller_data;
+ }
+
+ if (!spi->controller_state) {
+ if (cs != SPI_GPIO_NO_CHIPSELECT) {
+ status = gpio_request(cs, dev_name(&spi->dev));
+ if (status)
+ return status;
+ status = gpio_direction_output(cs,
+ !(spi->mode & SPI_CS_HIGH));
+ }
+ }
+ if (!status) {
+ /* in case it was initialized from static board data */
+ spi_gpio->cs_gpios[spi->chip_select] = cs;
+ status = spi_bitbang_setup(spi);
+ }
+
+ if (status) {
+ if (!spi->controller_state && cs != SPI_GPIO_NO_CHIPSELECT)
+ gpio_free(cs);
+ }
+ return status;
+}
+
+static void spi_gpio_cleanup(struct spi_device *spi)
+{
+ struct spi_gpio *spi_gpio = spi_to_spi_gpio(spi);
+ unsigned long cs = spi_gpio->cs_gpios[spi->chip_select];
+
+ if (cs != SPI_GPIO_NO_CHIPSELECT)
+ gpio_free(cs);
+ spi_bitbang_cleanup(spi);
+}
+
+static int spi_gpio_alloc(unsigned pin, const char *label, bool is_in)
+{
+ int value;
+
+ value = gpio_request(pin, label);
+ if (value == 0) {
+ if (is_in)
+ value = gpio_direction_input(pin);
+ else
+ value = gpio_direction_output(pin, 0);
+ }
+ return value;
+}
+
+static int spi_gpio_request(struct spi_gpio_platform_data *pdata,
+ const char *label, u16 *res_flags)
+{
+ int value;
+
+ /* NOTE: SPI_*_GPIO symbols may reference "pdata" */
+
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI) {
+ value = spi_gpio_alloc(SPI_MOSI_GPIO, label, false);
+ if (value)
+ goto done;
+ } else {
+ /* HW configuration without MOSI pin */
+ *res_flags |= SPI_MASTER_NO_TX;
+ }
+
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO) {
+ value = spi_gpio_alloc(SPI_MISO_GPIO, label, true);
+ if (value)
+ goto free_mosi;
+ } else {
+ /* HW configuration without MISO pin */
+ *res_flags |= SPI_MASTER_NO_RX;
+ }
+
+ value = spi_gpio_alloc(SPI_SCK_GPIO, label, false);
+ if (value)
+ goto free_miso;
+
+ goto done;
+
+free_miso:
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
+free_mosi:
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
+done:
+ return value;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id spi_gpio_dt_ids[] = {
+ { .compatible = "spi-gpio" },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_gpio_dt_ids);
+
+static int spi_gpio_probe_dt(struct platform_device *pdev)
+{
+ int ret;
+ u32 tmp;
+ struct spi_gpio_platform_data *pdata;
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(spi_gpio_dt_ids, &pdev->dev);
+
+ if (!of_id)
+ return 0;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ ret = of_get_named_gpio(np, "gpio-sck", 0);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "gpio-sck property not found\n");
+ goto error_free;
+ }
+ pdata->sck = ret;
+
+ ret = of_get_named_gpio(np, "gpio-miso", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-miso property not found, switching to no-rx mode\n");
+ pdata->miso = SPI_GPIO_NO_MISO;
+ } else
+ pdata->miso = ret;
+
+ ret = of_get_named_gpio(np, "gpio-mosi", 0);
+ if (ret < 0) {
+ dev_info(&pdev->dev, "gpio-mosi property not found, switching to no-tx mode\n");
+ pdata->mosi = SPI_GPIO_NO_MOSI;
+ } else
+ pdata->mosi = ret;
+
+ ret = of_property_read_u32(np, "num-chipselects", &tmp);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "num-chipselects property not found\n");
+ goto error_free;
+ }
+
+ pdata->num_chipselect = tmp;
+ pdev->dev.platform_data = pdata;
+
+ return 1;
+
+error_free:
+ devm_kfree(&pdev->dev, pdata);
+ return ret;
+}
+#else
+static inline int spi_gpio_probe_dt(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif
+
+static int spi_gpio_probe(struct platform_device *pdev)
+{
+ int status;
+ struct spi_master *master;
+ struct spi_gpio *spi_gpio;
+ struct spi_gpio_platform_data *pdata;
+ u16 master_flags = 0;
+ bool use_of = 0;
+ int num_devices;
+
+ status = spi_gpio_probe_dt(pdev);
+ if (status < 0)
+ return status;
+ if (status > 0)
+ use_of = 1;
+
+ pdata = dev_get_platdata(&pdev->dev);
+#ifdef GENERIC_BITBANG
+ if (!pdata || (!use_of && !pdata->num_chipselect))
+ return -ENODEV;
+#endif
+
+ if (use_of && !SPI_N_CHIPSEL)
+ num_devices = 1;
+ else
+ num_devices = SPI_N_CHIPSEL;
+
+ status = spi_gpio_request(pdata, dev_name(&pdev->dev), &master_flags);
+ if (status < 0)
+ return status;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi_gpio) +
+ (sizeof(unsigned long) * num_devices));
+ if (!master) {
+ status = -ENOMEM;
+ goto gpio_free;
+ }
+ spi_gpio = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, spi_gpio);
+
+ spi_gpio->pdev = pdev;
+ if (pdata)
+ spi_gpio->pdata = *pdata;
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->flags = master_flags;
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_devices;
+ master->setup = spi_gpio_setup;
+ master->cleanup = spi_gpio_cleanup;
+#ifdef CONFIG_OF
+ master->dev.of_node = pdev->dev.of_node;
+
+ if (use_of) {
+ int i;
+ struct device_node *np = pdev->dev.of_node;
+
+ /*
+ * In DT environments, take the CS GPIO from the "cs-gpios"
+ * property of the node.
+ */
+
+ if (!SPI_N_CHIPSEL)
+ spi_gpio->cs_gpios[0] = SPI_GPIO_NO_CHIPSELECT;
+ else
+ for (i = 0; i < SPI_N_CHIPSEL; i++) {
+ status = of_get_named_gpio(np, "cs-gpios", i);
+ if (status < 0) {
+ dev_err(&pdev->dev,
+ "invalid cs-gpios property\n");
+ goto gpio_free;
+ }
+ spi_gpio->cs_gpios[i] = status;
+ }
+ }
+#endif
+
+ spi_gpio->bitbang.master = master;
+ spi_gpio->bitbang.chipselect = spi_gpio_chipselect;
+
+ if ((master_flags & (SPI_MASTER_NO_TX | SPI_MASTER_NO_RX)) == 0) {
+ spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_txrx_word_mode0;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_txrx_word_mode1;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_txrx_word_mode2;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_txrx_word_mode3;
+ } else {
+ spi_gpio->bitbang.txrx_word[SPI_MODE_0] = spi_gpio_spec_txrx_word_mode0;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_1] = spi_gpio_spec_txrx_word_mode1;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_2] = spi_gpio_spec_txrx_word_mode2;
+ spi_gpio->bitbang.txrx_word[SPI_MODE_3] = spi_gpio_spec_txrx_word_mode3;
+ }
+ spi_gpio->bitbang.setup_transfer = spi_bitbang_setup_transfer;
+ spi_gpio->bitbang.flags = SPI_CS_HIGH;
+
+ status = spi_bitbang_start(&spi_gpio->bitbang);
+ if (status < 0) {
+gpio_free:
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
+ gpio_free(SPI_SCK_GPIO);
+ spi_master_put(master);
+ }
+
+ return status;
+}
+
+static int spi_gpio_remove(struct platform_device *pdev)
+{
+ struct spi_gpio *spi_gpio;
+ struct spi_gpio_platform_data *pdata;
+
+ spi_gpio = platform_get_drvdata(pdev);
+ pdata = dev_get_platdata(&pdev->dev);
+
+ /* stop() unregisters child devices too */
+ spi_bitbang_stop(&spi_gpio->bitbang);
+
+ if (SPI_MISO_GPIO != SPI_GPIO_NO_MISO)
+ gpio_free(SPI_MISO_GPIO);
+ if (SPI_MOSI_GPIO != SPI_GPIO_NO_MOSI)
+ gpio_free(SPI_MOSI_GPIO);
+ gpio_free(SPI_SCK_GPIO);
+ spi_master_put(spi_gpio->bitbang.master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+static struct platform_driver spi_gpio_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = of_match_ptr(spi_gpio_dt_ids),
+ },
+ .probe = spi_gpio_probe,
+ .remove = spi_gpio_remove,
+};
+module_platform_driver(spi_gpio_driver);
+
+MODULE_DESCRIPTION("SPI master driver using generic bitbanged GPIO ");
+MODULE_AUTHOR("David Brownell");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
new file mode 100644
index 000000000..788e2b176
--- /dev/null
+++ b/drivers/spi/spi-img-spfi.c
@@ -0,0 +1,759 @@
+/*
+ * IMG SPFI controller driver
+ *
+ * Copyright (C) 2007,2008,2013 Imagination Technologies Ltd.
+ * Copyright (C) 2014 Google, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/scatterlist.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spinlock.h>
+
+#define SPFI_DEVICE_PARAMETER(x) (0x00 + 0x4 * (x))
+#define SPFI_DEVICE_PARAMETER_BITCLK_SHIFT 24
+#define SPFI_DEVICE_PARAMETER_BITCLK_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSSETUP_SHIFT 16
+#define SPFI_DEVICE_PARAMETER_CSSETUP_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSHOLD_SHIFT 8
+#define SPFI_DEVICE_PARAMETER_CSHOLD_MASK 0xff
+#define SPFI_DEVICE_PARAMETER_CSDELAY_SHIFT 0
+#define SPFI_DEVICE_PARAMETER_CSDELAY_MASK 0xff
+
+#define SPFI_CONTROL 0x14
+#define SPFI_CONTROL_CONTINUE BIT(12)
+#define SPFI_CONTROL_SOFT_RESET BIT(11)
+#define SPFI_CONTROL_SEND_DMA BIT(10)
+#define SPFI_CONTROL_GET_DMA BIT(9)
+#define SPFI_CONTROL_TMODE_SHIFT 5
+#define SPFI_CONTROL_TMODE_MASK 0x7
+#define SPFI_CONTROL_TMODE_SINGLE 0
+#define SPFI_CONTROL_TMODE_DUAL 1
+#define SPFI_CONTROL_TMODE_QUAD 2
+#define SPFI_CONTROL_SPFI_EN BIT(0)
+
+#define SPFI_TRANSACTION 0x18
+#define SPFI_TRANSACTION_TSIZE_SHIFT 16
+#define SPFI_TRANSACTION_TSIZE_MASK 0xffff
+
+#define SPFI_PORT_STATE 0x1c
+#define SPFI_PORT_STATE_DEV_SEL_SHIFT 20
+#define SPFI_PORT_STATE_DEV_SEL_MASK 0x7
+#define SPFI_PORT_STATE_CK_POL(x) BIT(19 - (x))
+#define SPFI_PORT_STATE_CK_PHASE(x) BIT(14 - (x))
+
+#define SPFI_TX_32BIT_VALID_DATA 0x20
+#define SPFI_TX_8BIT_VALID_DATA 0x24
+#define SPFI_RX_32BIT_VALID_DATA 0x28
+#define SPFI_RX_8BIT_VALID_DATA 0x2c
+
+#define SPFI_INTERRUPT_STATUS 0x30
+#define SPFI_INTERRUPT_ENABLE 0x34
+#define SPFI_INTERRUPT_CLEAR 0x38
+#define SPFI_INTERRUPT_IACCESS BIT(12)
+#define SPFI_INTERRUPT_GDEX8BIT BIT(11)
+#define SPFI_INTERRUPT_ALLDONETRIG BIT(9)
+#define SPFI_INTERRUPT_GDFUL BIT(8)
+#define SPFI_INTERRUPT_GDHF BIT(7)
+#define SPFI_INTERRUPT_GDEX32BIT BIT(6)
+#define SPFI_INTERRUPT_GDTRIG BIT(5)
+#define SPFI_INTERRUPT_SDFUL BIT(3)
+#define SPFI_INTERRUPT_SDHF BIT(2)
+#define SPFI_INTERRUPT_SDE BIT(1)
+#define SPFI_INTERRUPT_SDTRIG BIT(0)
+
+/*
+ * There are four parallel FIFOs of 16 bytes each. The word buffer
+ * (*_32BIT_VALID_DATA) accesses all four FIFOs at once, resulting in an
+ * effective FIFO size of 64 bytes. The byte buffer (*_8BIT_VALID_DATA)
+ * accesses only a single FIFO, resulting in an effective FIFO size of
+ * 16 bytes.
+ */
+#define SPFI_32BIT_FIFO_SIZE 64
+#define SPFI_8BIT_FIFO_SIZE 16
+
+struct img_spfi {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ void __iomem *regs;
+ phys_addr_t phys;
+ int irq;
+ struct clk *spfi_clk;
+ struct clk *sys_clk;
+
+ struct dma_chan *rx_ch;
+ struct dma_chan *tx_ch;
+ bool tx_dma_busy;
+ bool rx_dma_busy;
+};
+
+static inline u32 spfi_readl(struct img_spfi *spfi, u32 reg)
+{
+ return readl(spfi->regs + reg);
+}
+
+static inline void spfi_writel(struct img_spfi *spfi, u32 val, u32 reg)
+{
+ writel(val, spfi->regs + reg);
+}
+
+static inline void spfi_start(struct img_spfi *spfi)
+{
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val |= SPFI_CONTROL_SPFI_EN;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static inline void spfi_reset(struct img_spfi *spfi)
+{
+ spfi_writel(spfi, SPFI_CONTROL_SOFT_RESET, SPFI_CONTROL);
+ spfi_writel(spfi, 0, SPFI_CONTROL);
+}
+
+static int spfi_wait_all_done(struct img_spfi *spfi)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(50);
+
+ while (time_before(jiffies, timeout)) {
+ u32 status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+
+ if (status & SPFI_INTERRUPT_ALLDONETRIG) {
+ spfi_writel(spfi, SPFI_INTERRUPT_ALLDONETRIG,
+ SPFI_INTERRUPT_CLEAR);
+ return 0;
+ }
+ cpu_relax();
+ }
+
+ dev_err(spfi->dev, "Timed out waiting for transaction to complete\n");
+ spfi_reset(spfi);
+
+ return -ETIMEDOUT;
+}
+
+static unsigned int spfi_pio_write32(struct img_spfi *spfi, const u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max / 4) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count], SPFI_TX_32BIT_VALID_DATA);
+ count++;
+ }
+
+ return count * 4;
+}
+
+static unsigned int spfi_pio_write8(struct img_spfi *spfi, const u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_SDFUL, SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_SDFUL)
+ break;
+ spfi_writel(spfi, buf[count], SPFI_TX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static unsigned int spfi_pio_read32(struct img_spfi *spfi, u32 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max / 4) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX32BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX32BIT))
+ break;
+ buf[count] = spfi_readl(spfi, SPFI_RX_32BIT_VALID_DATA);
+ count++;
+ }
+
+ return count * 4;
+}
+
+static unsigned int spfi_pio_read8(struct img_spfi *spfi, u8 *buf,
+ unsigned int max)
+{
+ unsigned int count = 0;
+ u32 status;
+
+ while (count < max) {
+ spfi_writel(spfi, SPFI_INTERRUPT_GDEX8BIT,
+ SPFI_INTERRUPT_CLEAR);
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (!(status & SPFI_INTERRUPT_GDEX8BIT))
+ break;
+ buf[count] = spfi_readl(spfi, SPFI_RX_8BIT_VALID_DATA);
+ count++;
+ }
+
+ return count;
+}
+
+static int img_spfi_start_pio(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ unsigned int tx_bytes = 0, rx_bytes = 0;
+ const void *tx_buf = xfer->tx_buf;
+ void *rx_buf = xfer->rx_buf;
+ unsigned long timeout;
+ int ret;
+
+ if (tx_buf)
+ tx_bytes = xfer->len;
+ if (rx_buf)
+ rx_bytes = xfer->len;
+
+ spfi_start(spfi);
+
+ timeout = jiffies +
+ msecs_to_jiffies(xfer->len * 8 * 1000 / xfer->speed_hz + 100);
+ while ((tx_bytes > 0 || rx_bytes > 0) &&
+ time_before(jiffies, timeout)) {
+ unsigned int tx_count, rx_count;
+
+ if (tx_bytes >= 4)
+ tx_count = spfi_pio_write32(spfi, tx_buf, tx_bytes);
+ else
+ tx_count = spfi_pio_write8(spfi, tx_buf, tx_bytes);
+
+ if (rx_bytes >= 4)
+ rx_count = spfi_pio_read32(spfi, rx_buf, rx_bytes);
+ else
+ rx_count = spfi_pio_read8(spfi, rx_buf, rx_bytes);
+
+ tx_buf += tx_count;
+ rx_buf += rx_count;
+ tx_bytes -= tx_count;
+ rx_bytes -= rx_count;
+
+ cpu_relax();
+ }
+
+ ret = spfi_wait_all_done(spfi);
+ if (ret < 0)
+ return ret;
+
+ if (rx_bytes > 0 || tx_bytes > 0) {
+ dev_err(spfi->dev, "PIO transfer timed out\n");
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static void img_spfi_dma_rx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spfi_wait_all_done(spfi);
+
+ spin_lock_irqsave(&spfi->lock, flags);
+ spfi->rx_dma_busy = false;
+ if (!spfi->tx_dma_busy)
+ spi_finalize_current_transfer(spfi->master);
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static void img_spfi_dma_tx_cb(void *data)
+{
+ struct img_spfi *spfi = data;
+ unsigned long flags;
+
+ spfi_wait_all_done(spfi);
+
+ spin_lock_irqsave(&spfi->lock, flags);
+ spfi->tx_dma_busy = false;
+ if (!spfi->rx_dma_busy)
+ spi_finalize_current_transfer(spfi->master);
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_start_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ struct dma_async_tx_descriptor *rxdesc = NULL, *txdesc = NULL;
+ struct dma_slave_config rxconf, txconf;
+
+ spfi->rx_dma_busy = false;
+ spfi->tx_dma_busy = false;
+
+ if (xfer->rx_buf) {
+ rxconf.direction = DMA_DEV_TO_MEM;
+ if (xfer->len % 4 == 0) {
+ rxconf.src_addr = spfi->phys + SPFI_RX_32BIT_VALID_DATA;
+ rxconf.src_addr_width = 4;
+ rxconf.src_maxburst = 4;
+ } else {
+ rxconf.src_addr = spfi->phys + SPFI_RX_8BIT_VALID_DATA;
+ rxconf.src_addr_width = 1;
+ rxconf.src_maxburst = 4;
+ }
+ dmaengine_slave_config(spfi->rx_ch, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(spfi->rx_ch, xfer->rx_sg.sgl,
+ xfer->rx_sg.nents,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT);
+ if (!rxdesc)
+ goto stop_dma;
+
+ rxdesc->callback = img_spfi_dma_rx_cb;
+ rxdesc->callback_param = spfi;
+ }
+
+ if (xfer->tx_buf) {
+ txconf.direction = DMA_MEM_TO_DEV;
+ if (xfer->len % 4 == 0) {
+ txconf.dst_addr = spfi->phys + SPFI_TX_32BIT_VALID_DATA;
+ txconf.dst_addr_width = 4;
+ txconf.dst_maxburst = 4;
+ } else {
+ txconf.dst_addr = spfi->phys + SPFI_TX_8BIT_VALID_DATA;
+ txconf.dst_addr_width = 1;
+ txconf.dst_maxburst = 4;
+ }
+ dmaengine_slave_config(spfi->tx_ch, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(spfi->tx_ch, xfer->tx_sg.sgl,
+ xfer->tx_sg.nents,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT);
+ if (!txdesc)
+ goto stop_dma;
+
+ txdesc->callback = img_spfi_dma_tx_cb;
+ txdesc->callback_param = spfi;
+ }
+
+ if (xfer->rx_buf) {
+ spfi->rx_dma_busy = true;
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(spfi->rx_ch);
+ }
+
+ spfi_start(spfi);
+
+ if (xfer->tx_buf) {
+ spfi->tx_dma_busy = true;
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(spfi->tx_ch);
+ }
+
+ return 1;
+
+stop_dma:
+ dmaengine_terminate_all(spfi->rx_ch);
+ dmaengine_terminate_all(spfi->tx_ch);
+ return -EIO;
+}
+
+static void img_spfi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ unsigned long flags;
+
+ /*
+ * Stop all DMA and reset the controller if the previous transaction
+ * timed-out and never completed it's DMA.
+ */
+ spin_lock_irqsave(&spfi->lock, flags);
+ if (spfi->tx_dma_busy || spfi->rx_dma_busy) {
+ spfi->tx_dma_busy = false;
+ spfi->rx_dma_busy = false;
+
+ dmaengine_terminate_all(spfi->tx_ch);
+ dmaengine_terminate_all(spfi->rx_ch);
+ }
+ spin_unlock_irqrestore(&spfi->lock, flags);
+}
+
+static int img_spfi_prepare(struct spi_master *master, struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ u32 val;
+
+ val = spfi_readl(spfi, SPFI_PORT_STATE);
+ if (msg->spi->mode & SPI_CPHA)
+ val |= SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_PHASE(msg->spi->chip_select);
+ if (msg->spi->mode & SPI_CPOL)
+ val |= SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ else
+ val &= ~SPFI_PORT_STATE_CK_POL(msg->spi->chip_select);
+ spfi_writel(spfi, val, SPFI_PORT_STATE);
+
+ return 0;
+}
+
+static int img_spfi_unprepare(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ spfi_reset(spfi);
+
+ return 0;
+}
+
+static int img_spfi_setup(struct spi_device *spi)
+{
+ int ret;
+
+ ret = gpio_request_one(spi->cs_gpio, (spi->mode & SPI_CS_HIGH) ?
+ GPIOF_OUT_INIT_LOW : GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (ret)
+ dev_err(&spi->dev, "can't request chipselect gpio %d\n",
+ spi->cs_gpio);
+
+ return ret;
+}
+
+static void img_spfi_cleanup(struct spi_device *spi)
+{
+ gpio_free(spi->cs_gpio);
+}
+
+static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ u32 val, div;
+
+ /*
+ * output = spfi_clk * (BITCLK / 512), where BITCLK must be a
+ * power of 2 up to 128
+ */
+ div = DIV_ROUND_UP(clk_get_rate(spfi->spfi_clk), xfer->speed_hz);
+ div = clamp(512 / (1 << get_count_order(div)), 1, 128);
+
+ val = spfi_readl(spfi, SPFI_DEVICE_PARAMETER(spi->chip_select));
+ val &= ~(SPFI_DEVICE_PARAMETER_BITCLK_MASK <<
+ SPFI_DEVICE_PARAMETER_BITCLK_SHIFT);
+ val |= div << SPFI_DEVICE_PARAMETER_BITCLK_SHIFT;
+ spfi_writel(spfi, val, SPFI_DEVICE_PARAMETER(spi->chip_select));
+
+ spfi_writel(spfi, xfer->len << SPFI_TRANSACTION_TSIZE_SHIFT,
+ SPFI_TRANSACTION);
+
+ val = spfi_readl(spfi, SPFI_CONTROL);
+ val &= ~(SPFI_CONTROL_SEND_DMA | SPFI_CONTROL_GET_DMA);
+ if (xfer->tx_buf)
+ val |= SPFI_CONTROL_SEND_DMA;
+ if (xfer->rx_buf)
+ val |= SPFI_CONTROL_GET_DMA;
+ val &= ~(SPFI_CONTROL_TMODE_MASK << SPFI_CONTROL_TMODE_SHIFT);
+ if (xfer->tx_nbits == SPI_NBITS_DUAL &&
+ xfer->rx_nbits == SPI_NBITS_DUAL)
+ val |= SPFI_CONTROL_TMODE_DUAL << SPFI_CONTROL_TMODE_SHIFT;
+ else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
+ xfer->rx_nbits == SPI_NBITS_QUAD)
+ val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
+ spfi_writel(spfi, val, SPFI_CONTROL);
+}
+
+static int img_spfi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct img_spfi *spfi = spi_master_get_devdata(spi->master);
+ int ret;
+
+ if (xfer->len > SPFI_TRANSACTION_TSIZE_MASK) {
+ dev_err(spfi->dev,
+ "Transfer length (%d) is greater than the max supported (%d)",
+ xfer->len, SPFI_TRANSACTION_TSIZE_MASK);
+ return -EINVAL;
+ }
+
+ img_spfi_config(master, spi, xfer);
+ if (master->can_dma && master->can_dma(master, spi, xfer))
+ ret = img_spfi_start_dma(master, spi, xfer);
+ else
+ ret = img_spfi_start_pio(master, spi, xfer);
+
+ return ret;
+}
+
+static bool img_spfi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ if (xfer->len > SPFI_32BIT_FIFO_SIZE)
+ return true;
+ return false;
+}
+
+static irqreturn_t img_spfi_irq(int irq, void *dev_id)
+{
+ struct img_spfi *spfi = (struct img_spfi *)dev_id;
+ u32 status;
+
+ status = spfi_readl(spfi, SPFI_INTERRUPT_STATUS);
+ if (status & SPFI_INTERRUPT_IACCESS) {
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_CLEAR);
+ dev_err(spfi->dev, "Illegal access interrupt");
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int img_spfi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct img_spfi *spfi;
+ struct resource *res;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spfi));
+ if (!master)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, master);
+
+ spfi = spi_master_get_devdata(master);
+ spfi->dev = &pdev->dev;
+ spfi->master = master;
+ spin_lock_init(&spfi->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spfi->regs = devm_ioremap_resource(spfi->dev, res);
+ if (IS_ERR(spfi->regs)) {
+ ret = PTR_ERR(spfi->regs);
+ goto put_spi;
+ }
+ spfi->phys = res->start;
+
+ spfi->irq = platform_get_irq(pdev, 0);
+ if (spfi->irq < 0) {
+ ret = spfi->irq;
+ goto put_spi;
+ }
+ ret = devm_request_irq(spfi->dev, spfi->irq, img_spfi_irq,
+ IRQ_TYPE_LEVEL_HIGH, dev_name(spfi->dev), spfi);
+ if (ret)
+ goto put_spi;
+
+ spfi->sys_clk = devm_clk_get(spfi->dev, "sys");
+ if (IS_ERR(spfi->sys_clk)) {
+ ret = PTR_ERR(spfi->sys_clk);
+ goto put_spi;
+ }
+ spfi->spfi_clk = devm_clk_get(spfi->dev, "spfi");
+ if (IS_ERR(spfi->spfi_clk)) {
+ ret = PTR_ERR(spfi->spfi_clk);
+ goto put_spi;
+ }
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ goto put_spi;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret)
+ goto disable_pclk;
+
+ spfi_reset(spfi);
+ /*
+ * Only enable the error (IACCESS) interrupt. In PIO mode we'll
+ * poll the status of the FIFOs.
+ */
+ spfi_writel(spfi, SPFI_INTERRUPT_IACCESS, SPFI_INTERRUPT_ENABLE);
+
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_TX_DUAL | SPI_RX_DUAL;
+ if (of_property_read_bool(spfi->dev->of_node, "img,supports-quad-mode"))
+ master->mode_bits |= SPI_TX_QUAD | SPI_RX_QUAD;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(8);
+ master->max_speed_hz = clk_get_rate(spfi->spfi_clk) / 4;
+ master->min_speed_hz = clk_get_rate(spfi->spfi_clk) / 512;
+
+ master->setup = img_spfi_setup;
+ master->cleanup = img_spfi_cleanup;
+ master->transfer_one = img_spfi_transfer_one;
+ master->prepare_message = img_spfi_prepare;
+ master->unprepare_message = img_spfi_unprepare;
+ master->handle_err = img_spfi_handle_err;
+
+ spfi->tx_ch = dma_request_slave_channel(spfi->dev, "tx");
+ spfi->rx_ch = dma_request_slave_channel(spfi->dev, "rx");
+ if (!spfi->tx_ch || !spfi->rx_ch) {
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ dev_warn(spfi->dev, "Failed to get DMA channels, falling back to PIO mode\n");
+ } else {
+ master->dma_tx = spfi->tx_ch;
+ master->dma_rx = spfi->rx_ch;
+ master->can_dma = img_spfi_can_dma;
+ }
+
+ pm_runtime_set_active(spfi->dev);
+ pm_runtime_enable(spfi->dev);
+
+ ret = devm_spi_register_master(spfi->dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(spfi->dev);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ clk_disable_unprepare(spfi->spfi_clk);
+disable_pclk:
+ clk_disable_unprepare(spfi->sys_clk);
+put_spi:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int img_spfi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ if (spfi->tx_ch)
+ dma_release_channel(spfi->tx_ch);
+ if (spfi->rx_ch)
+ dma_release_channel(spfi->rx_ch);
+
+ pm_runtime_disable(spfi->dev);
+ if (!pm_runtime_status_suspended(spfi->dev)) {
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+ }
+
+ spi_master_put(master);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int img_spfi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spfi->spfi_clk);
+ clk_disable_unprepare(spfi->sys_clk);
+
+ return 0;
+}
+
+static int img_spfi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spfi->sys_clk);
+ if (ret)
+ return ret;
+ ret = clk_prepare_enable(spfi->spfi_clk);
+ if (ret) {
+ clk_disable_unprepare(spfi->sys_clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int img_spfi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int img_spfi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct img_spfi *spfi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret)
+ return ret;
+ spfi_reset(spfi);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static const struct dev_pm_ops img_spfi_pm_ops = {
+ SET_RUNTIME_PM_OPS(img_spfi_runtime_suspend, img_spfi_runtime_resume,
+ NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(img_spfi_suspend, img_spfi_resume)
+};
+
+static const struct of_device_id img_spfi_of_match[] = {
+ { .compatible = "img,spfi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, img_spfi_of_match);
+
+static struct platform_driver img_spfi_driver = {
+ .driver = {
+ .name = "img-spfi",
+ .pm = &img_spfi_pm_ops,
+ .of_match_table = of_match_ptr(img_spfi_of_match),
+ },
+ .probe = img_spfi_probe,
+ .remove = img_spfi_remove,
+};
+module_platform_driver(img_spfi_driver);
+
+MODULE_DESCRIPTION("IMG SPFI controller driver");
+MODULE_AUTHOR("Andrew Bresticker <abrestic@chromium.org>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
new file mode 100644
index 000000000..f08e812b2
--- /dev/null
+++ b/drivers/spi/spi-imx.c
@@ -0,0 +1,1249 @@
+/*
+ * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Juergen Beisert
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2
+ * of the License, or (at your option) any later version.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation
+ * 51 Franklin Street, Fifth Floor
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+
+#include <linux/platform_data/dma-imx.h>
+#include <linux/platform_data/spi-imx.h>
+
+#define DRIVER_NAME "spi_imx"
+
+#define MXC_CSPIRXDATA 0x00
+#define MXC_CSPITXDATA 0x04
+#define MXC_CSPICTRL 0x08
+#define MXC_CSPIINT 0x0c
+#define MXC_RESET 0x1c
+
+/* generic defines to abstract from the different register layouts */
+#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
+#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
+
+/* The maximum bytes that a sdma BD can transfer.*/
+#define MAX_SDMA_BD_BYTES (1 << 15)
+#define IMX_DMA_TIMEOUT (msecs_to_jiffies(3000))
+struct spi_imx_config {
+ unsigned int speed_hz;
+ unsigned int bpw;
+ unsigned int mode;
+ u8 cs;
+};
+
+enum spi_imx_devtype {
+ IMX1_CSPI,
+ IMX21_CSPI,
+ IMX27_CSPI,
+ IMX31_CSPI,
+ IMX35_CSPI, /* CSPI on all i.mx except above */
+ IMX51_ECSPI, /* ECSPI on i.mx51 and later */
+};
+
+struct spi_imx_data;
+
+struct spi_imx_devtype_data {
+ void (*intctrl)(struct spi_imx_data *, int);
+ int (*config)(struct spi_imx_data *, struct spi_imx_config *);
+ void (*trigger)(struct spi_imx_data *);
+ int (*rx_available)(struct spi_imx_data *);
+ void (*reset)(struct spi_imx_data *);
+ enum spi_imx_devtype devtype;
+};
+
+struct spi_imx_data {
+ struct spi_bitbang bitbang;
+
+ struct completion xfer_done;
+ void __iomem *base;
+ struct clk *clk_per;
+ struct clk *clk_ipg;
+ unsigned long spi_clk;
+
+ unsigned int count;
+ void (*tx)(struct spi_imx_data *);
+ void (*rx)(struct spi_imx_data *);
+ void *rx_buf;
+ const void *tx_buf;
+ unsigned int txfifo; /* number of words pushed in tx FIFO */
+
+ /* DMA */
+ unsigned int dma_is_inited;
+ unsigned int dma_finished;
+ bool usedma;
+ u32 rx_wml;
+ u32 tx_wml;
+ u32 rxt_wml;
+ struct completion dma_rx_completion;
+ struct completion dma_tx_completion;
+
+ const struct spi_imx_devtype_data *devtype_data;
+ int chipselect[0];
+};
+
+static inline int is_imx27_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX27_CSPI;
+}
+
+static inline int is_imx35_cspi(struct spi_imx_data *d)
+{
+ return d->devtype_data->devtype == IMX35_CSPI;
+}
+
+static inline unsigned spi_imx_get_fifosize(struct spi_imx_data *d)
+{
+ return (d->devtype_data->devtype == IMX51_ECSPI) ? 64 : 8;
+}
+
+#define MXC_SPI_BUF_RX(type) \
+static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
+{ \
+ unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
+ \
+ if (spi_imx->rx_buf) { \
+ *(type *)spi_imx->rx_buf = val; \
+ spi_imx->rx_buf += sizeof(type); \
+ } \
+}
+
+#define MXC_SPI_BUF_TX(type) \
+static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
+{ \
+ type val = 0; \
+ \
+ if (spi_imx->tx_buf) { \
+ val = *(type *)spi_imx->tx_buf; \
+ spi_imx->tx_buf += sizeof(type); \
+ } \
+ \
+ spi_imx->count -= sizeof(type); \
+ \
+ writel(val, spi_imx->base + MXC_CSPITXDATA); \
+}
+
+MXC_SPI_BUF_RX(u8)
+MXC_SPI_BUF_TX(u8)
+MXC_SPI_BUF_RX(u16)
+MXC_SPI_BUF_TX(u16)
+MXC_SPI_BUF_RX(u32)
+MXC_SPI_BUF_TX(u32)
+
+/* First entry is reserved, second entry is valid only if SDHC_SPIEN is set
+ * (which is currently not the case in this driver)
+ */
+static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
+ 256, 384, 512, 768, 1024};
+
+/* MX21, MX27 */
+static unsigned int spi_imx_clkdiv_1(unsigned int fin,
+ unsigned int fspi, unsigned int max)
+{
+ int i;
+
+ for (i = 2; i < max; i++)
+ if (fspi * mxc_clkdivs[i] >= fin)
+ return i;
+
+ return max;
+}
+
+/* MX1, MX31, MX35, MX51 CSPI */
+static unsigned int spi_imx_clkdiv_2(unsigned int fin,
+ unsigned int fspi)
+{
+ int i, div = 4;
+
+ for (i = 0; i < 7; i++) {
+ if (fspi * div >= fin)
+ return i;
+ div <<= 1;
+ }
+
+ return 7;
+}
+
+static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml)
+ && (transfer->len > spi_imx->tx_wml))
+ return true;
+ return false;
+}
+
+#define MX51_ECSPI_CTRL 0x08
+#define MX51_ECSPI_CTRL_ENABLE (1 << 0)
+#define MX51_ECSPI_CTRL_XCH (1 << 2)
+#define MX51_ECSPI_CTRL_SMC (1 << 3)
+#define MX51_ECSPI_CTRL_MODE_MASK (0xf << 4)
+#define MX51_ECSPI_CTRL_POSTDIV_OFFSET 8
+#define MX51_ECSPI_CTRL_PREDIV_OFFSET 12
+#define MX51_ECSPI_CTRL_CS(cs) ((cs) << 18)
+#define MX51_ECSPI_CTRL_BL_OFFSET 20
+
+#define MX51_ECSPI_CONFIG 0x0c
+#define MX51_ECSPI_CONFIG_SCLKPHA(cs) (1 << ((cs) + 0))
+#define MX51_ECSPI_CONFIG_SCLKPOL(cs) (1 << ((cs) + 4))
+#define MX51_ECSPI_CONFIG_SBBCTRL(cs) (1 << ((cs) + 8))
+#define MX51_ECSPI_CONFIG_SSBPOL(cs) (1 << ((cs) + 12))
+#define MX51_ECSPI_CONFIG_SCLKCTL(cs) (1 << ((cs) + 20))
+
+#define MX51_ECSPI_INT 0x10
+#define MX51_ECSPI_INT_TEEN (1 << 0)
+#define MX51_ECSPI_INT_RREN (1 << 3)
+
+#define MX51_ECSPI_DMA 0x14
+#define MX51_ECSPI_DMA_TX_WML_OFFSET 0
+#define MX51_ECSPI_DMA_TX_WML_MASK 0x3F
+#define MX51_ECSPI_DMA_RX_WML_OFFSET 16
+#define MX51_ECSPI_DMA_RX_WML_MASK (0x3F << 16)
+#define MX51_ECSPI_DMA_RXT_WML_OFFSET 24
+#define MX51_ECSPI_DMA_RXT_WML_MASK (0x3F << 24)
+
+#define MX51_ECSPI_DMA_TEDEN_OFFSET 7
+#define MX51_ECSPI_DMA_RXDEN_OFFSET 23
+#define MX51_ECSPI_DMA_RXTDEN_OFFSET 31
+
+#define MX51_ECSPI_STAT 0x18
+#define MX51_ECSPI_STAT_RR (1 << 3)
+
+/* MX51 eCSPI */
+static unsigned int mx51_ecspi_clkdiv(unsigned int fin, unsigned int fspi,
+ unsigned int *fres)
+{
+ /*
+ * there are two 4-bit dividers, the pre-divider divides by
+ * $pre, the post-divider by 2^$post
+ */
+ unsigned int pre, post;
+
+ if (unlikely(fspi > fin))
+ return 0;
+
+ post = fls(fin) - fls(fspi);
+ if (fin > fspi << post)
+ post++;
+
+ /* now we have: (fin <= fspi << post) with post being minimal */
+
+ post = max(4U, post) - 4;
+ if (unlikely(post > 0xf)) {
+ pr_err("%s: cannot set clock freq: %u (base freq: %u)\n",
+ __func__, fspi, fin);
+ return 0xff;
+ }
+
+ pre = DIV_ROUND_UP(fin, fspi << post) - 1;
+
+ pr_debug("%s: fin: %u, fspi: %u, post: %u, pre: %u\n",
+ __func__, fin, fspi, post, pre);
+
+ /* Resulting frequency for the SCLK line. */
+ *fres = (fin / (pre + 1)) >> post;
+
+ return (pre << MX51_ECSPI_CTRL_PREDIV_OFFSET) |
+ (post << MX51_ECSPI_CTRL_POSTDIV_OFFSET);
+}
+
+static void __maybe_unused mx51_ecspi_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX51_ECSPI_INT_TEEN;
+
+ if (enable & MXC_INT_RR)
+ val |= MX51_ECSPI_INT_RREN;
+
+ writel(val, spi_imx->base + MX51_ECSPI_INT);
+}
+
+static void __maybe_unused mx51_ecspi_trigger(struct spi_imx_data *spi_imx)
+{
+ u32 reg = readl(spi_imx->base + MX51_ECSPI_CTRL);
+
+ if (!spi_imx->usedma)
+ reg |= MX51_ECSPI_CTRL_XCH;
+ else if (!spi_imx->dma_finished)
+ reg |= MX51_ECSPI_CTRL_SMC;
+ else
+ reg &= ~MX51_ECSPI_CTRL_SMC;
+ writel(reg, spi_imx->base + MX51_ECSPI_CTRL);
+}
+
+static int __maybe_unused mx51_ecspi_config(struct spi_imx_data *spi_imx,
+ struct spi_imx_config *config)
+{
+ u32 ctrl = MX51_ECSPI_CTRL_ENABLE, cfg = 0, dma = 0;
+ u32 tx_wml_cfg, rx_wml_cfg, rxt_wml_cfg;
+ u32 clk = config->speed_hz, delay;
+
+ /*
+ * The hardware seems to have a race condition when changing modes. The
+ * current assumption is that the selection of the channel arrives
+ * earlier in the hardware than the mode bits when they are written at
+ * the same time.
+ * So set master mode for all channels as we do not support slave mode.
+ */
+ ctrl |= MX51_ECSPI_CTRL_MODE_MASK;
+
+ /* set clock speed */
+ ctrl |= mx51_ecspi_clkdiv(spi_imx->spi_clk, config->speed_hz, &clk);
+
+ /* set chip select to use */
+ ctrl |= MX51_ECSPI_CTRL_CS(config->cs);
+
+ ctrl |= (config->bpw - 1) << MX51_ECSPI_CTRL_BL_OFFSET;
+
+ cfg |= MX51_ECSPI_CONFIG_SBBCTRL(config->cs);
+
+ if (config->mode & SPI_CPHA)
+ cfg |= MX51_ECSPI_CONFIG_SCLKPHA(config->cs);
+
+ if (config->mode & SPI_CPOL) {
+ cfg |= MX51_ECSPI_CONFIG_SCLKPOL(config->cs);
+ cfg |= MX51_ECSPI_CONFIG_SCLKCTL(config->cs);
+ }
+ if (config->mode & SPI_CS_HIGH)
+ cfg |= MX51_ECSPI_CONFIG_SSBPOL(config->cs);
+
+ writel(ctrl, spi_imx->base + MX51_ECSPI_CTRL);
+ writel(cfg, spi_imx->base + MX51_ECSPI_CONFIG);
+
+ /*
+ * Wait until the changes in the configuration register CONFIGREG
+ * propagate into the hardware. It takes exactly one tick of the
+ * SCLK clock, but we will wait two SCLK clock just to be sure. The
+ * effect of the delay it takes for the hardware to apply changes
+ * is noticable if the SCLK clock run very slow. In such a case, if
+ * the polarity of SCLK should be inverted, the GPIO ChipSelect might
+ * be asserted before the SCLK polarity changes, which would disrupt
+ * the SPI communication as the device on the other end would consider
+ * the change of SCLK polarity as a clock tick already.
+ */
+ delay = (2 * 1000000) / clk;
+ if (likely(delay < 10)) /* SCLK is faster than 100 kHz */
+ udelay(delay);
+ else /* SCLK is _very_ slow */
+ usleep_range(delay, delay + 10);
+
+ /*
+ * Configure the DMA register: setup the watermark
+ * and enable DMA request.
+ */
+ if (spi_imx->dma_is_inited) {
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+
+ spi_imx->rxt_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ rx_wml_cfg = spi_imx->rx_wml << MX51_ECSPI_DMA_RX_WML_OFFSET;
+ tx_wml_cfg = spi_imx->tx_wml << MX51_ECSPI_DMA_TX_WML_OFFSET;
+ rxt_wml_cfg = spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET;
+ dma = (dma & ~MX51_ECSPI_DMA_TX_WML_MASK
+ & ~MX51_ECSPI_DMA_RX_WML_MASK
+ & ~MX51_ECSPI_DMA_RXT_WML_MASK)
+ | rx_wml_cfg | tx_wml_cfg | rxt_wml_cfg
+ |(1 << MX51_ECSPI_DMA_TEDEN_OFFSET)
+ |(1 << MX51_ECSPI_DMA_RXDEN_OFFSET)
+ |(1 << MX51_ECSPI_DMA_RXTDEN_OFFSET);
+
+ writel(dma, spi_imx->base + MX51_ECSPI_DMA);
+ }
+
+ return 0;
+}
+
+static int __maybe_unused mx51_ecspi_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MX51_ECSPI_STAT) & MX51_ECSPI_STAT_RR;
+}
+
+static void __maybe_unused mx51_ecspi_reset(struct spi_imx_data *spi_imx)
+{
+ /* drain receive buffer */
+ while (mx51_ecspi_rx_available(spi_imx))
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+}
+
+#define MX31_INTREG_TEEN (1 << 0)
+#define MX31_INTREG_RREN (1 << 3)
+
+#define MX31_CSPICTRL_ENABLE (1 << 0)
+#define MX31_CSPICTRL_MASTER (1 << 1)
+#define MX31_CSPICTRL_XCH (1 << 2)
+#define MX31_CSPICTRL_POL (1 << 4)
+#define MX31_CSPICTRL_PHA (1 << 5)
+#define MX31_CSPICTRL_SSCTL (1 << 6)
+#define MX31_CSPICTRL_SSPOL (1 << 7)
+#define MX31_CSPICTRL_BC_SHIFT 8
+#define MX35_CSPICTRL_BL_SHIFT 20
+#define MX31_CSPICTRL_CS_SHIFT 24
+#define MX35_CSPICTRL_CS_SHIFT 12
+#define MX31_CSPICTRL_DR_SHIFT 16
+
+#define MX31_CSPISTATUS 0x14
+#define MX31_STATUS_RR (1 << 3)
+
+/* These functions also work for the i.MX35, but be aware that
+ * the i.MX35 has a slightly different register layout for bits
+ * we do not use here.
+ */
+static void __maybe_unused mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX31_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX31_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void __maybe_unused mx31_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX31_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int __maybe_unused mx31_config(struct spi_imx_data *spi_imx,
+ struct spi_imx_config *config)
+{
+ unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
+ int cs = spi_imx->chipselect[config->cs];
+
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+ MX31_CSPICTRL_DR_SHIFT;
+
+ if (is_imx35_cspi(spi_imx)) {
+ reg |= (config->bpw - 1) << MX35_CSPICTRL_BL_SHIFT;
+ reg |= MX31_CSPICTRL_SSCTL;
+ } else {
+ reg |= (config->bpw - 1) << MX31_CSPICTRL_BC_SHIFT;
+ }
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX31_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX31_CSPICTRL_POL;
+ if (config->mode & SPI_CS_HIGH)
+ reg |= MX31_CSPICTRL_SSPOL;
+ if (cs < 0)
+ reg |= (cs + 32) <<
+ (is_imx35_cspi(spi_imx) ? MX35_CSPICTRL_CS_SHIFT :
+ MX31_CSPICTRL_CS_SHIFT);
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int __maybe_unused mx31_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
+}
+
+static void __maybe_unused mx31_reset(struct spi_imx_data *spi_imx)
+{
+ /* drain receive buffer */
+ while (readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR)
+ readl(spi_imx->base + MXC_CSPIRXDATA);
+}
+
+#define MX21_INTREG_RR (1 << 4)
+#define MX21_INTREG_TEEN (1 << 9)
+#define MX21_INTREG_RREN (1 << 13)
+
+#define MX21_CSPICTRL_POL (1 << 5)
+#define MX21_CSPICTRL_PHA (1 << 6)
+#define MX21_CSPICTRL_SSPOL (1 << 8)
+#define MX21_CSPICTRL_XCH (1 << 9)
+#define MX21_CSPICTRL_ENABLE (1 << 10)
+#define MX21_CSPICTRL_MASTER (1 << 11)
+#define MX21_CSPICTRL_DR_SHIFT 14
+#define MX21_CSPICTRL_CS_SHIFT 19
+
+static void __maybe_unused mx21_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX21_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX21_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void __maybe_unused mx21_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX21_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int __maybe_unused mx21_config(struct spi_imx_data *spi_imx,
+ struct spi_imx_config *config)
+{
+ unsigned int reg = MX21_CSPICTRL_ENABLE | MX21_CSPICTRL_MASTER;
+ int cs = spi_imx->chipselect[config->cs];
+ unsigned int max = is_imx27_cspi(spi_imx) ? 16 : 18;
+
+ reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz, max) <<
+ MX21_CSPICTRL_DR_SHIFT;
+ reg |= config->bpw - 1;
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX21_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX21_CSPICTRL_POL;
+ if (config->mode & SPI_CS_HIGH)
+ reg |= MX21_CSPICTRL_SSPOL;
+ if (cs < 0)
+ reg |= (cs + 32) << MX21_CSPICTRL_CS_SHIFT;
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int __maybe_unused mx21_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MXC_CSPIINT) & MX21_INTREG_RR;
+}
+
+static void __maybe_unused mx21_reset(struct spi_imx_data *spi_imx)
+{
+ writel(1, spi_imx->base + MXC_RESET);
+}
+
+#define MX1_INTREG_RR (1 << 3)
+#define MX1_INTREG_TEEN (1 << 8)
+#define MX1_INTREG_RREN (1 << 11)
+
+#define MX1_CSPICTRL_POL (1 << 4)
+#define MX1_CSPICTRL_PHA (1 << 5)
+#define MX1_CSPICTRL_XCH (1 << 8)
+#define MX1_CSPICTRL_ENABLE (1 << 9)
+#define MX1_CSPICTRL_MASTER (1 << 10)
+#define MX1_CSPICTRL_DR_SHIFT 13
+
+static void __maybe_unused mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
+{
+ unsigned int val = 0;
+
+ if (enable & MXC_INT_TE)
+ val |= MX1_INTREG_TEEN;
+ if (enable & MXC_INT_RR)
+ val |= MX1_INTREG_RREN;
+
+ writel(val, spi_imx->base + MXC_CSPIINT);
+}
+
+static void __maybe_unused mx1_trigger(struct spi_imx_data *spi_imx)
+{
+ unsigned int reg;
+
+ reg = readl(spi_imx->base + MXC_CSPICTRL);
+ reg |= MX1_CSPICTRL_XCH;
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+}
+
+static int __maybe_unused mx1_config(struct spi_imx_data *spi_imx,
+ struct spi_imx_config *config)
+{
+ unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
+
+ reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
+ MX1_CSPICTRL_DR_SHIFT;
+ reg |= config->bpw - 1;
+
+ if (config->mode & SPI_CPHA)
+ reg |= MX1_CSPICTRL_PHA;
+ if (config->mode & SPI_CPOL)
+ reg |= MX1_CSPICTRL_POL;
+
+ writel(reg, spi_imx->base + MXC_CSPICTRL);
+
+ return 0;
+}
+
+static int __maybe_unused mx1_rx_available(struct spi_imx_data *spi_imx)
+{
+ return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
+}
+
+static void __maybe_unused mx1_reset(struct spi_imx_data *spi_imx)
+{
+ writel(1, spi_imx->base + MXC_RESET);
+}
+
+static struct spi_imx_devtype_data imx1_cspi_devtype_data = {
+ .intctrl = mx1_intctrl,
+ .config = mx1_config,
+ .trigger = mx1_trigger,
+ .rx_available = mx1_rx_available,
+ .reset = mx1_reset,
+ .devtype = IMX1_CSPI,
+};
+
+static struct spi_imx_devtype_data imx21_cspi_devtype_data = {
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX21_CSPI,
+};
+
+static struct spi_imx_devtype_data imx27_cspi_devtype_data = {
+ /* i.mx27 cspi shares the functions with i.mx21 one */
+ .intctrl = mx21_intctrl,
+ .config = mx21_config,
+ .trigger = mx21_trigger,
+ .rx_available = mx21_rx_available,
+ .reset = mx21_reset,
+ .devtype = IMX27_CSPI,
+};
+
+static struct spi_imx_devtype_data imx31_cspi_devtype_data = {
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX31_CSPI,
+};
+
+static struct spi_imx_devtype_data imx35_cspi_devtype_data = {
+ /* i.mx35 and later cspi shares the functions with i.mx31 one */
+ .intctrl = mx31_intctrl,
+ .config = mx31_config,
+ .trigger = mx31_trigger,
+ .rx_available = mx31_rx_available,
+ .reset = mx31_reset,
+ .devtype = IMX35_CSPI,
+};
+
+static struct spi_imx_devtype_data imx51_ecspi_devtype_data = {
+ .intctrl = mx51_ecspi_intctrl,
+ .config = mx51_ecspi_config,
+ .trigger = mx51_ecspi_trigger,
+ .rx_available = mx51_ecspi_rx_available,
+ .reset = mx51_ecspi_reset,
+ .devtype = IMX51_ECSPI,
+};
+
+static struct platform_device_id spi_imx_devtype[] = {
+ {
+ .name = "imx1-cspi",
+ .driver_data = (kernel_ulong_t) &imx1_cspi_devtype_data,
+ }, {
+ .name = "imx21-cspi",
+ .driver_data = (kernel_ulong_t) &imx21_cspi_devtype_data,
+ }, {
+ .name = "imx27-cspi",
+ .driver_data = (kernel_ulong_t) &imx27_cspi_devtype_data,
+ }, {
+ .name = "imx31-cspi",
+ .driver_data = (kernel_ulong_t) &imx31_cspi_devtype_data,
+ }, {
+ .name = "imx35-cspi",
+ .driver_data = (kernel_ulong_t) &imx35_cspi_devtype_data,
+ }, {
+ .name = "imx51-ecspi",
+ .driver_data = (kernel_ulong_t) &imx51_ecspi_devtype_data,
+ }, {
+ /* sentinel */
+ }
+};
+
+static const struct of_device_id spi_imx_dt_ids[] = {
+ { .compatible = "fsl,imx1-cspi", .data = &imx1_cspi_devtype_data, },
+ { .compatible = "fsl,imx21-cspi", .data = &imx21_cspi_devtype_data, },
+ { .compatible = "fsl,imx27-cspi", .data = &imx27_cspi_devtype_data, },
+ { .compatible = "fsl,imx31-cspi", .data = &imx31_cspi_devtype_data, },
+ { .compatible = "fsl,imx35-cspi", .data = &imx35_cspi_devtype_data, },
+ { .compatible = "fsl,imx51-ecspi", .data = &imx51_ecspi_devtype_data, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, spi_imx_dt_ids);
+
+static void spi_imx_chipselect(struct spi_device *spi, int is_active)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int gpio = spi_imx->chipselect[spi->chip_select];
+ int active = is_active != BITBANG_CS_INACTIVE;
+ int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
+
+ if (!gpio_is_valid(gpio))
+ return;
+
+ gpio_set_value(gpio, dev_is_lowactive ^ active);
+}
+
+static void spi_imx_push(struct spi_imx_data *spi_imx)
+{
+ while (spi_imx->txfifo < spi_imx_get_fifosize(spi_imx)) {
+ if (!spi_imx->count)
+ break;
+ spi_imx->tx(spi_imx);
+ spi_imx->txfifo++;
+ }
+
+ spi_imx->devtype_data->trigger(spi_imx);
+}
+
+static irqreturn_t spi_imx_isr(int irq, void *dev_id)
+{
+ struct spi_imx_data *spi_imx = dev_id;
+
+ while (spi_imx->devtype_data->rx_available(spi_imx)) {
+ spi_imx->rx(spi_imx);
+ spi_imx->txfifo--;
+ }
+
+ if (spi_imx->count) {
+ spi_imx_push(spi_imx);
+ return IRQ_HANDLED;
+ }
+
+ if (spi_imx->txfifo) {
+ /* No data left to push, but still waiting for rx data,
+ * enable receive data available interrupt.
+ */
+ spi_imx->devtype_data->intctrl(
+ spi_imx, MXC_INT_RR);
+ return IRQ_HANDLED;
+ }
+
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+ complete(&spi_imx->xfer_done);
+
+ return IRQ_HANDLED;
+}
+
+static int spi_imx_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ struct spi_imx_config config;
+
+ config.bpw = t ? t->bits_per_word : spi->bits_per_word;
+ config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
+ config.mode = spi->mode;
+ config.cs = spi->chip_select;
+
+ if (!config.speed_hz)
+ config.speed_hz = spi->max_speed_hz;
+ if (!config.bpw)
+ config.bpw = spi->bits_per_word;
+
+ /* Initialize the functions for transfer */
+ if (config.bpw <= 8) {
+ spi_imx->rx = spi_imx_buf_rx_u8;
+ spi_imx->tx = spi_imx_buf_tx_u8;
+ } else if (config.bpw <= 16) {
+ spi_imx->rx = spi_imx_buf_rx_u16;
+ spi_imx->tx = spi_imx_buf_tx_u16;
+ } else {
+ spi_imx->rx = spi_imx_buf_rx_u32;
+ spi_imx->tx = spi_imx_buf_tx_u32;
+ }
+
+ spi_imx->devtype_data->config(spi_imx, &config);
+
+ return 0;
+}
+
+static void spi_imx_sdma_exit(struct spi_imx_data *spi_imx)
+{
+ struct spi_master *master = spi_imx->bitbang.master;
+
+ if (master->dma_rx) {
+ dma_release_channel(master->dma_rx);
+ master->dma_rx = NULL;
+ }
+
+ if (master->dma_tx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ }
+
+ spi_imx->dma_is_inited = 0;
+}
+
+static int spi_imx_sdma_init(struct device *dev, struct spi_imx_data *spi_imx,
+ struct spi_master *master,
+ const struct resource *res)
+{
+ struct dma_slave_config slave_config = {};
+ int ret;
+
+ /* use pio mode for i.mx6dl chip TKT238285 */
+ if (of_machine_is_compatible("fsl,imx6dl"))
+ return 0;
+
+ /* Prepare for TX DMA: */
+ master->dma_tx = dma_request_slave_channel(dev, "tx");
+ if (!master->dma_tx) {
+ dev_err(dev, "cannot get the TX DMA channel!\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_MEM_TO_DEV;
+ slave_config.dst_addr = res->start + MXC_CSPITXDATA;
+ slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.dst_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+ ret = dmaengine_slave_config(master->dma_tx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in TX dma configuration.\n");
+ goto err;
+ }
+
+ /* Prepare for RX : */
+ master->dma_rx = dma_request_slave_channel(dev, "rx");
+ if (!master->dma_rx) {
+ dev_dbg(dev, "cannot get the DMA channel.\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ slave_config.direction = DMA_DEV_TO_MEM;
+ slave_config.src_addr = res->start + MXC_CSPIRXDATA;
+ slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ slave_config.src_maxburst = spi_imx_get_fifosize(spi_imx) / 2;
+ ret = dmaengine_slave_config(master->dma_rx, &slave_config);
+ if (ret) {
+ dev_err(dev, "error in RX dma configuration.\n");
+ goto err;
+ }
+
+ init_completion(&spi_imx->dma_rx_completion);
+ init_completion(&spi_imx->dma_tx_completion);
+ master->can_dma = spi_imx_can_dma;
+ master->max_dma_len = MAX_SDMA_BD_BYTES;
+ spi_imx->bitbang.master->flags = SPI_MASTER_MUST_RX |
+ SPI_MASTER_MUST_TX;
+ spi_imx->tx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->rx_wml = spi_imx_get_fifosize(spi_imx) / 2;
+ spi_imx->dma_is_inited = 1;
+
+ return 0;
+err:
+ spi_imx_sdma_exit(spi_imx);
+ return ret;
+}
+
+static void spi_imx_dma_rx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_rx_completion);
+}
+
+static void spi_imx_dma_tx_callback(void *cookie)
+{
+ struct spi_imx_data *spi_imx = (struct spi_imx_data *)cookie;
+
+ complete(&spi_imx->dma_tx_completion);
+}
+
+static int spi_imx_dma_transfer(struct spi_imx_data *spi_imx,
+ struct spi_transfer *transfer)
+{
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ int ret;
+ unsigned long timeout;
+ u32 dma;
+ int left;
+ struct spi_master *master = spi_imx->bitbang.master;
+ struct sg_table *tx = &transfer->tx_sg, *rx = &transfer->rx_sg;
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(master->dma_tx,
+ tx->sgl, tx->nents, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx)
+ goto no_dma;
+
+ desc_tx->callback = spi_imx_dma_tx_callback;
+ desc_tx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_tx);
+ }
+
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(master->dma_rx,
+ rx->sgl, rx->nents, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ goto no_dma;
+
+ desc_rx->callback = spi_imx_dma_rx_callback;
+ desc_rx->callback_param = (void *)spi_imx;
+ dmaengine_submit(desc_rx);
+ }
+
+ reinit_completion(&spi_imx->dma_rx_completion);
+ reinit_completion(&spi_imx->dma_tx_completion);
+
+ /* Trigger the cspi module. */
+ spi_imx->dma_finished = 0;
+
+ dma = readl(spi_imx->base + MX51_ECSPI_DMA);
+ dma = dma & (~MX51_ECSPI_DMA_RXT_WML_MASK);
+ /* Change RX_DMA_LENGTH trigger dma fetch tail data */
+ left = transfer->len % spi_imx->rxt_wml;
+ if (left)
+ writel(dma | (left << MX51_ECSPI_DMA_RXT_WML_OFFSET),
+ spi_imx->base + MX51_ECSPI_DMA);
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ dma_async_issue_pending(master->dma_tx);
+ dma_async_issue_pending(master->dma_rx);
+ /* Wait SDMA to finish the data transfer.*/
+ timeout = wait_for_completion_timeout(&spi_imx->dma_tx_completion,
+ IMX_DMA_TIMEOUT);
+ if (!timeout) {
+ pr_warn("%s %s: I/O Error in DMA TX\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ dmaengine_terminate_all(master->dma_tx);
+ } else {
+ timeout = wait_for_completion_timeout(
+ &spi_imx->dma_rx_completion, IMX_DMA_TIMEOUT);
+ if (!timeout) {
+ pr_warn("%s %s: I/O Error in DMA RX\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ spi_imx->devtype_data->reset(spi_imx);
+ dmaengine_terminate_all(master->dma_rx);
+ }
+ writel(dma |
+ spi_imx->rxt_wml << MX51_ECSPI_DMA_RXT_WML_OFFSET,
+ spi_imx->base + MX51_ECSPI_DMA);
+ }
+
+ spi_imx->dma_finished = 1;
+ spi_imx->devtype_data->trigger(spi_imx);
+
+ if (!timeout)
+ ret = -ETIMEDOUT;
+ else
+ ret = transfer->len;
+
+ return ret;
+
+no_dma:
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&master->dev),
+ dev_name(&master->dev));
+ return -EAGAIN;
+}
+
+static int spi_imx_pio_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+
+ spi_imx->tx_buf = transfer->tx_buf;
+ spi_imx->rx_buf = transfer->rx_buf;
+ spi_imx->count = transfer->len;
+ spi_imx->txfifo = 0;
+
+ reinit_completion(&spi_imx->xfer_done);
+
+ spi_imx_push(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, MXC_INT_TE);
+
+ wait_for_completion(&spi_imx->xfer_done);
+
+ return transfer->len;
+}
+
+static int spi_imx_transfer(struct spi_device *spi,
+ struct spi_transfer *transfer)
+{
+ int ret;
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+
+ if (spi_imx->bitbang.master->can_dma &&
+ spi_imx_can_dma(spi_imx->bitbang.master, spi, transfer)) {
+ spi_imx->usedma = true;
+ ret = spi_imx_dma_transfer(spi_imx, transfer);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+ spi_imx->usedma = false;
+
+ return spi_imx_pio_transfer(spi, transfer);
+}
+
+static int spi_imx_setup(struct spi_device *spi)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
+ int gpio = spi_imx->chipselect[spi->chip_select];
+
+ dev_dbg(&spi->dev, "%s: mode %d, %u bpw, %d hz\n", __func__,
+ spi->mode, spi->bits_per_word, spi->max_speed_hz);
+
+ if (gpio_is_valid(gpio))
+ gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
+
+ spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
+
+ return 0;
+}
+
+static void spi_imx_cleanup(struct spi_device *spi)
+{
+}
+
+static int
+spi_imx_prepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_enable(spi_imx->clk_per);
+ if (ret)
+ return ret;
+
+ ret = clk_enable(spi_imx->clk_ipg);
+ if (ret) {
+ clk_disable(spi_imx->clk_per);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+spi_imx_unprepare_message(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ return 0;
+}
+
+static int spi_imx_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ const struct of_device_id *of_id =
+ of_match_device(spi_imx_dt_ids, &pdev->dev);
+ struct spi_imx_master *mxc_platform_info =
+ dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ struct spi_imx_data *spi_imx;
+ struct resource *res;
+ int i, ret, num_cs, irq;
+
+ if (!np && !mxc_platform_info) {
+ dev_err(&pdev->dev, "can't get the platform data\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
+ if (ret < 0) {
+ if (mxc_platform_info)
+ num_cs = mxc_platform_info->num_chipselect;
+ else
+ return ret;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 32);
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+
+ spi_imx = spi_master_get_devdata(master);
+ spi_imx->bitbang.master = master;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+ if (!gpio_is_valid(cs_gpio) && mxc_platform_info)
+ cs_gpio = mxc_platform_info->chipselect[i];
+
+ spi_imx->chipselect[i] = cs_gpio;
+ if (!gpio_is_valid(cs_gpio))
+ continue;
+
+ ret = devm_gpio_request(&pdev->dev, spi_imx->chipselect[i],
+ DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get cs gpios\n");
+ goto out_master_put;
+ }
+ }
+
+ spi_imx->bitbang.chipselect = spi_imx_chipselect;
+ spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
+ spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
+ spi_imx->bitbang.master->setup = spi_imx_setup;
+ spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
+ spi_imx->bitbang.master->prepare_message = spi_imx_prepare_message;
+ spi_imx->bitbang.master->unprepare_message = spi_imx_unprepare_message;
+ spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ init_completion(&spi_imx->xfer_done);
+
+ spi_imx->devtype_data = of_id ? of_id->data :
+ (struct spi_imx_devtype_data *) pdev->id_entry->driver_data;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi_imx->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_imx->base)) {
+ ret = PTR_ERR(spi_imx->base);
+ goto out_master_put;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto out_master_put;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_imx_isr, 0,
+ dev_name(&pdev->dev), spi_imx);
+ if (ret) {
+ dev_err(&pdev->dev, "can't get irq%d: %d\n", irq, ret);
+ goto out_master_put;
+ }
+
+ spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+ if (IS_ERR(spi_imx->clk_ipg)) {
+ ret = PTR_ERR(spi_imx->clk_ipg);
+ goto out_master_put;
+ }
+
+ spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
+ if (IS_ERR(spi_imx->clk_per)) {
+ ret = PTR_ERR(spi_imx->clk_per);
+ goto out_master_put;
+ }
+
+ ret = clk_prepare_enable(spi_imx->clk_per);
+ if (ret)
+ goto out_master_put;
+
+ ret = clk_prepare_enable(spi_imx->clk_ipg);
+ if (ret)
+ goto out_put_per;
+
+ spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
+ /*
+ * Only validated on i.mx6 now, can remove the constrain if validated on
+ * other chips.
+ */
+ if (spi_imx->devtype_data == &imx51_ecspi_devtype_data
+ && spi_imx_sdma_init(&pdev->dev, spi_imx, master, res))
+ dev_err(&pdev->dev, "dma setup error,use pio instead\n");
+
+ spi_imx->devtype_data->reset(spi_imx);
+
+ spi_imx->devtype_data->intctrl(spi_imx, 0);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = spi_bitbang_start(&spi_imx->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
+ goto out_clk_put;
+ }
+
+ dev_info(&pdev->dev, "probed\n");
+
+ clk_disable(spi_imx->clk_ipg);
+ clk_disable(spi_imx->clk_per);
+ return ret;
+
+out_clk_put:
+ clk_disable_unprepare(spi_imx->clk_ipg);
+out_put_per:
+ clk_disable_unprepare(spi_imx->clk_per);
+out_master_put:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int spi_imx_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&spi_imx->bitbang);
+
+ writel(0, spi_imx->base + MXC_CSPICTRL);
+ clk_unprepare(spi_imx->clk_ipg);
+ clk_unprepare(spi_imx->clk_per);
+ spi_imx_sdma_exit(spi_imx);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static struct platform_driver spi_imx_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = spi_imx_dt_ids,
+ },
+ .id_table = spi_imx_devtype,
+ .probe = spi_imx_probe,
+ .remove = spi_imx_remove,
+};
+module_platform_driver(spi_imx_driver);
+
+MODULE_DESCRIPTION("SPI Master Controller driver");
+MODULE_AUTHOR("Sascha Hauer, Pengutronix");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/spi/spi-lm70llp.c b/drivers/spi/spi-lm70llp.c
new file mode 100644
index 000000000..ba72347cb
--- /dev/null
+++ b/drivers/spi/spi-lm70llp.c
@@ -0,0 +1,344 @@
+/*
+ * Driver for LM70EVAL-LLP board for the LM70 sensor
+ *
+ * Copyright (C) 2006 Kaiwan N Billimoria <kaiwan@designergraphix.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/parport.h>
+#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+
+/*
+ * The LM70 communicates with a host processor using a 3-wire variant of
+ * the SPI/Microwire bus interface. This driver specifically supports an
+ * NS LM70 LLP Evaluation Board, interfacing to a PC using its parallel
+ * port to bitbang an SPI-parport bridge. Accordingly, this is an SPI
+ * master controller driver. The hwmon/lm70 driver is a "SPI protocol
+ * driver", layered on top of this one and usable without the lm70llp.
+ *
+ * Datasheet and Schematic:
+ * The LM70 is a temperature sensor chip from National Semiconductor; its
+ * datasheet is available at http://www.national.com/pf/LM/LM70.html
+ * The schematic for this particular board (the LM70EVAL-LLP) is
+ * available (on page 4) here:
+ * http://www.national.com/appinfo/tempsensors/files/LM70LLPEVALmanual.pdf
+ *
+ * Also see Documentation/spi/spi-lm70llp. The SPI<->parport code here is
+ * (heavily) based on spi-butterfly by David Brownell.
+ *
+ * The LM70 LLP connects to the PC parallel port in the following manner:
+ *
+ * Parallel LM70 LLP
+ * Port Direction JP2 Header
+ * ----------- --------- ------------
+ * D0 2 - -
+ * D1 3 --> V+ 5
+ * D2 4 --> V+ 5
+ * D3 5 --> V+ 5
+ * D4 6 --> V+ 5
+ * D5 7 --> nCS 8
+ * D6 8 --> SCLK 3
+ * D7 9 --> SI/O 5
+ * GND 25 - GND 7
+ * Select 13 <-- SI/O 1
+ *
+ * Note that parport pin 13 actually gets inverted by the transistor
+ * arrangement which lets either the parport or the LM70 drive the
+ * SI/SO signal (see the schematic for details).
+ */
+
+#define DRVNAME "spi-lm70llp"
+
+#define lm70_INIT 0xBE
+#define SIO 0x10
+#define nCS 0x20
+#define SCLK 0x40
+
+/*-------------------------------------------------------------------------*/
+
+struct spi_lm70llp {
+ struct spi_bitbang bitbang;
+ struct parport *port;
+ struct pardevice *pd;
+ struct spi_device *spidev_lm70;
+ struct spi_board_info info;
+ //struct device *dev;
+};
+
+/* REVISIT : ugly global ; provides "exclusive open" facility */
+static struct spi_lm70llp *lm70llp;
+
+
+/*-------------------------------------------------------------------*/
+
+static inline struct spi_lm70llp *spidev_to_pp(struct spi_device *spi)
+{
+ return spi->controller_data;
+}
+
+/*---------------------- LM70 LLP eval board-specific inlines follow */
+
+/* NOTE: we don't actually need to reread the output values, since they'll
+ * still be what we wrote before. Plus, going through parport builds in
+ * a ~1ms/operation delay; these SPI transfers could easily be faster.
+ */
+
+static inline void deassertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ data &= ~0x80; /* pull D7/SI-out low while de-asserted */
+ parport_write_data(pp->port, data | nCS);
+}
+
+static inline void assertCS(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+
+ data |= 0x80; /* pull D7/SI-out high so lm70 drives SO-in */
+ parport_write_data(pp->port, data & ~nCS);
+}
+
+static inline void clkHigh(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data | SCLK);
+}
+
+static inline void clkLow(struct spi_lm70llp *pp)
+{
+ u8 data = parport_read_data(pp->port);
+ parport_write_data(pp->port, data & ~SCLK);
+}
+
+/*------------------------- SPI-LM70-specific inlines ----------------------*/
+
+static inline void spidelay(unsigned d)
+{
+ udelay(d);
+}
+
+static inline void setsck(struct spi_device *s, int is_on)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+
+ if (is_on)
+ clkHigh(pp);
+ else
+ clkLow(pp);
+}
+
+static inline void setmosi(struct spi_device *s, int is_on)
+{
+ /* FIXME update D7 ... this way we can put the chip
+ * into shutdown mode and read the manufacturer ID,
+ * but we can't put it back into operational mode.
+ */
+}
+
+/*
+ * getmiso:
+ * Why do we return 0 when the SIO line is high and vice-versa?
+ * The fact is, the lm70 eval board from NS (which this driver drives),
+ * is wired in just such a way : when the lm70's SIO goes high, a transistor
+ * switches it to low reflecting this on the parport (pin 13), and vice-versa.
+ */
+static inline int getmiso(struct spi_device *s)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(s);
+ return ((SIO == (parport_read_status(pp->port) & SIO)) ? 0 : 1 );
+}
+/*--------------------------------------------------------------------*/
+
+#include "spi-bitbang-txrx.h"
+
+static void lm70_chipselect(struct spi_device *spi, int value)
+{
+ struct spi_lm70llp *pp = spidev_to_pp(spi);
+
+ if (value)
+ assertCS(pp);
+ else
+ deassertCS(pp);
+}
+
+/*
+ * Our actual bitbanger routine.
+ */
+static u32 lm70_txrx(struct spi_device *spi, unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+}
+
+static void spi_lm70llp_attach(struct parport *p)
+{
+ struct pardevice *pd;
+ struct spi_lm70llp *pp;
+ struct spi_master *master;
+ int status;
+
+ if (lm70llp) {
+ printk(KERN_WARNING
+ "%s: spi_lm70llp instance already loaded. Aborting.\n",
+ DRVNAME);
+ return;
+ }
+
+ /* TODO: this just _assumes_ a lm70 is there ... no probe;
+ * the lm70 driver could verify it, reading the manf ID.
+ */
+
+ master = spi_alloc_master(p->physport->dev, sizeof *pp);
+ if (!master) {
+ status = -ENOMEM;
+ goto out_fail;
+ }
+ pp = spi_master_get_devdata(master);
+
+ /*
+ * SPI and bitbang hookup.
+ */
+ pp->bitbang.master = master;
+ pp->bitbang.chipselect = lm70_chipselect;
+ pp->bitbang.txrx_word[SPI_MODE_0] = lm70_txrx;
+ pp->bitbang.flags = SPI_3WIRE;
+
+ /*
+ * Parport hookup
+ */
+ pp->port = p;
+ pd = parport_register_device(p, DRVNAME,
+ NULL, NULL, NULL,
+ PARPORT_FLAG_EXCL, pp);
+ if (!pd) {
+ status = -ENOMEM;
+ goto out_free_master;
+ }
+ pp->pd = pd;
+
+ status = parport_claim(pd);
+ if (status < 0)
+ goto out_parport_unreg;
+
+ /*
+ * Start SPI ...
+ */
+ status = spi_bitbang_start(&pp->bitbang);
+ if (status < 0) {
+ printk(KERN_WARNING
+ "%s: spi_bitbang_start failed with status %d\n",
+ DRVNAME, status);
+ goto out_off_and_release;
+ }
+
+ /*
+ * The modalias name MUST match the device_driver name
+ * for the bus glue code to match and subsequently bind them.
+ * We are binding to the generic drivers/hwmon/lm70.c device
+ * driver.
+ */
+ strcpy(pp->info.modalias, "lm70");
+ pp->info.max_speed_hz = 6 * 1000 * 1000;
+ pp->info.chip_select = 0;
+ pp->info.mode = SPI_3WIRE | SPI_MODE_0;
+
+ /* power up the chip, and let the LM70 control SI/SO */
+ parport_write_data(pp->port, lm70_INIT);
+
+ /* Enable access to our primary data structure via
+ * the board info's (void *)controller_data.
+ */
+ pp->info.controller_data = pp;
+ pp->spidev_lm70 = spi_new_device(pp->bitbang.master, &pp->info);
+ if (pp->spidev_lm70)
+ dev_dbg(&pp->spidev_lm70->dev, "spidev_lm70 at %s\n",
+ dev_name(&pp->spidev_lm70->dev));
+ else {
+ printk(KERN_WARNING "%s: spi_new_device failed\n", DRVNAME);
+ status = -ENODEV;
+ goto out_bitbang_stop;
+ }
+ pp->spidev_lm70->bits_per_word = 8;
+
+ lm70llp = pp;
+ return;
+
+out_bitbang_stop:
+ spi_bitbang_stop(&pp->bitbang);
+out_off_and_release:
+ /* power down */
+ parport_write_data(pp->port, 0);
+ mdelay(10);
+ parport_release(pp->pd);
+out_parport_unreg:
+ parport_unregister_device(pd);
+out_free_master:
+ (void) spi_master_put(master);
+out_fail:
+ pr_info("%s: spi_lm70llp probe fail, status %d\n", DRVNAME, status);
+}
+
+static void spi_lm70llp_detach(struct parport *p)
+{
+ struct spi_lm70llp *pp;
+
+ if (!lm70llp || lm70llp->port != p)
+ return;
+
+ pp = lm70llp;
+ spi_bitbang_stop(&pp->bitbang);
+
+ /* power down */
+ parport_write_data(pp->port, 0);
+
+ parport_release(pp->pd);
+ parport_unregister_device(pp->pd);
+
+ (void) spi_master_put(pp->bitbang.master);
+
+ lm70llp = NULL;
+}
+
+
+static struct parport_driver spi_lm70llp_drv = {
+ .name = DRVNAME,
+ .attach = spi_lm70llp_attach,
+ .detach = spi_lm70llp_detach,
+};
+
+static int __init init_spi_lm70llp(void)
+{
+ return parport_register_driver(&spi_lm70llp_drv);
+}
+module_init(init_spi_lm70llp);
+
+static void __exit cleanup_spi_lm70llp(void)
+{
+ parport_unregister_driver(&spi_lm70llp_drv);
+}
+module_exit(cleanup_spi_lm70llp);
+
+MODULE_AUTHOR("Kaiwan N Billimoria <kaiwan@designergraphix.com>");
+MODULE_DESCRIPTION(
+ "Parport adapter for the National Semiconductor LM70 LLP eval board");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-meson-spifc.c b/drivers/spi/spi-meson-spifc.c
new file mode 100644
index 000000000..5468fc70d
--- /dev/null
+++ b/drivers/spi/spi-meson-spifc.c
@@ -0,0 +1,462 @@
+/*
+ * Driver for Amlogic Meson SPI flash controller (SPIFC)
+ *
+ * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * version 2 as published by the Free Software Foundation.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/spi/spi.h>
+#include <linux/types.h>
+
+/* register map */
+#define REG_CMD 0x00
+#define REG_ADDR 0x04
+#define REG_CTRL 0x08
+#define REG_CTRL1 0x0c
+#define REG_STATUS 0x10
+#define REG_CTRL2 0x14
+#define REG_CLOCK 0x18
+#define REG_USER 0x1c
+#define REG_USER1 0x20
+#define REG_USER2 0x24
+#define REG_USER3 0x28
+#define REG_USER4 0x2c
+#define REG_SLAVE 0x30
+#define REG_SLAVE1 0x34
+#define REG_SLAVE2 0x38
+#define REG_SLAVE3 0x3c
+#define REG_C0 0x40
+#define REG_B8 0x60
+#define REG_MAX 0x7c
+
+/* register fields */
+#define CMD_USER BIT(18)
+#define CTRL_ENABLE_AHB BIT(17)
+#define CLOCK_SOURCE BIT(31)
+#define CLOCK_DIV_SHIFT 12
+#define CLOCK_DIV_MASK (0x3f << CLOCK_DIV_SHIFT)
+#define CLOCK_CNT_HIGH_SHIFT 6
+#define CLOCK_CNT_HIGH_MASK (0x3f << CLOCK_CNT_HIGH_SHIFT)
+#define CLOCK_CNT_LOW_SHIFT 0
+#define CLOCK_CNT_LOW_MASK (0x3f << CLOCK_CNT_LOW_SHIFT)
+#define USER_DIN_EN_MS BIT(0)
+#define USER_CMP_MODE BIT(2)
+#define USER_UC_DOUT_SEL BIT(27)
+#define USER_UC_DIN_SEL BIT(28)
+#define USER_UC_MASK ((BIT(5) - 1) << 27)
+#define USER1_BN_UC_DOUT_SHIFT 17
+#define USER1_BN_UC_DOUT_MASK (0xff << 16)
+#define USER1_BN_UC_DIN_SHIFT 8
+#define USER1_BN_UC_DIN_MASK (0xff << 8)
+#define USER4_CS_ACT BIT(30)
+#define SLAVE_TRST_DONE BIT(4)
+#define SLAVE_OP_MODE BIT(30)
+#define SLAVE_SW_RST BIT(31)
+
+#define SPIFC_BUFFER_SIZE 64
+
+/**
+ * struct meson_spifc
+ * @master: the SPI master
+ * @regmap: regmap for device registers
+ * @clk: input clock of the built-in baud rate generator
+ * @device: the device structure
+ */
+struct meson_spifc {
+ struct spi_master *master;
+ struct regmap *regmap;
+ struct clk *clk;
+ struct device *dev;
+};
+
+static const struct regmap_config spifc_regmap_config = {
+ .reg_bits = 32,
+ .val_bits = 32,
+ .reg_stride = 4,
+ .max_register = REG_MAX,
+};
+
+/**
+ * meson_spifc_wait_ready() - wait for the current operation to terminate
+ * @spifc: the Meson SPI device
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_wait_ready(struct meson_spifc *spifc)
+{
+ unsigned long deadline = jiffies + msecs_to_jiffies(5);
+ u32 data;
+
+ do {
+ regmap_read(spifc->regmap, REG_SLAVE, &data);
+ if (data & SLAVE_TRST_DONE)
+ return 0;
+ cond_resched();
+ } while (!time_after(jiffies, deadline));
+
+ return -ETIMEDOUT;
+}
+
+/**
+ * meson_spifc_drain_buffer() - copy data from device buffer to memory
+ * @spifc: the Meson SPI device
+ * @buf: the destination buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_drain_buffer(struct meson_spifc *spifc, u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ regmap_read(spifc->regmap, REG_C0 + i, &data);
+
+ if (len - i >= 4) {
+ *((u32 *)buf) = data;
+ buf += 4;
+ } else {
+ memcpy(buf, &data, len - i);
+ break;
+ }
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_fill_buffer() - copy data from memory to device buffer
+ * @spifc: the Meson SPI device
+ * @buf: the source buffer
+ * @len: number of bytes to copy
+ */
+static void meson_spifc_fill_buffer(struct meson_spifc *spifc, const u8 *buf,
+ int len)
+{
+ u32 data;
+ int i = 0;
+
+ while (i < len) {
+ if (len - i >= 4)
+ data = *(u32 *)buf;
+ else
+ memcpy(&data, buf, len - i);
+
+ regmap_write(spifc->regmap, REG_C0 + i, data);
+
+ buf += 4;
+ i += 4;
+ }
+}
+
+/**
+ * meson_spifc_setup_speed() - program the clock divider
+ * @spifc: the Meson SPI device
+ * @speed: desired speed in Hz
+ */
+static void meson_spifc_setup_speed(struct meson_spifc *spifc, u32 speed)
+{
+ unsigned long parent, value;
+ int n;
+
+ parent = clk_get_rate(spifc->clk);
+ n = max_t(int, parent / speed - 1, 1);
+
+ dev_dbg(spifc->dev, "parent %lu, speed %u, n %d\n", parent,
+ speed, n);
+
+ value = (n << CLOCK_DIV_SHIFT) & CLOCK_DIV_MASK;
+ value |= (n << CLOCK_CNT_LOW_SHIFT) & CLOCK_CNT_LOW_MASK;
+ value |= (((n + 1) / 2 - 1) << CLOCK_CNT_HIGH_SHIFT) &
+ CLOCK_CNT_HIGH_MASK;
+
+ regmap_write(spifc->regmap, REG_CLOCK, value);
+}
+
+/**
+ * meson_spifc_txrx() - transfer a chunk of data
+ * @spifc: the Meson SPI device
+ * @xfer: the current SPI transfer
+ * @offset: offset of the data to transfer
+ * @len: length of the data to transfer
+ * @last_xfer: whether this is the last transfer of the message
+ * @last_chunk: whether this is the last chunk of the transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_txrx(struct meson_spifc *spifc,
+ struct spi_transfer *xfer,
+ int offset, int len, bool last_xfer,
+ bool last_chunk)
+{
+ bool keep_cs = true;
+ int ret;
+
+ if (xfer->tx_buf)
+ meson_spifc_fill_buffer(spifc, xfer->tx_buf + offset, len);
+
+ /* enable DOUT stage */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_UC_MASK,
+ USER_UC_DOUT_SEL);
+ regmap_write(spifc->regmap, REG_USER1,
+ (8 * len - 1) << USER1_BN_UC_DOUT_SHIFT);
+
+ /* enable data input during DOUT */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_DIN_EN_MS,
+ USER_DIN_EN_MS);
+
+ if (last_chunk) {
+ if (last_xfer)
+ keep_cs = xfer->cs_change;
+ else
+ keep_cs = !xfer->cs_change;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_USER4, USER4_CS_ACT,
+ keep_cs ? USER4_CS_ACT : 0);
+
+ /* clear transition done bit */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_TRST_DONE, 0);
+ /* start transfer */
+ regmap_update_bits(spifc->regmap, REG_CMD, CMD_USER, CMD_USER);
+
+ ret = meson_spifc_wait_ready(spifc);
+
+ if (!ret && xfer->rx_buf)
+ meson_spifc_drain_buffer(spifc, xfer->rx_buf + offset, len);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_transfer_one() - perform a single transfer
+ * @master: the SPI master
+ * @spi: the SPI device
+ * @xfer: the current SPI transfer
+ * Return: 0 on success, a negative value on error
+ */
+static int meson_spifc_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int len, done = 0, ret = 0;
+
+ meson_spifc_setup_speed(spifc, xfer->speed_hz);
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB, 0);
+
+ while (done < xfer->len && !ret) {
+ len = min_t(int, xfer->len - done, SPIFC_BUFFER_SIZE);
+ ret = meson_spifc_txrx(spifc, xfer, done, len,
+ spi_transfer_is_last(master, xfer),
+ done + len >= xfer->len);
+ done += len;
+ }
+
+ regmap_update_bits(spifc->regmap, REG_CTRL, CTRL_ENABLE_AHB,
+ CTRL_ENABLE_AHB);
+
+ return ret;
+}
+
+/**
+ * meson_spifc_hw_init() - reset and initialize the SPI controller
+ * @spifc: the Meson SPI device
+ */
+static void meson_spifc_hw_init(struct meson_spifc *spifc)
+{
+ /* reset device */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_SW_RST,
+ SLAVE_SW_RST);
+ /* disable compatible mode */
+ regmap_update_bits(spifc->regmap, REG_USER, USER_CMP_MODE, 0);
+ /* set master mode */
+ regmap_update_bits(spifc->regmap, REG_SLAVE, SLAVE_OP_MODE, 0);
+}
+
+static int meson_spifc_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct meson_spifc *spifc;
+ struct resource *res;
+ void __iomem *base;
+ unsigned int rate;
+ int ret = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct meson_spifc));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ spifc = spi_master_get_devdata(master);
+ spifc->dev = &pdev->dev;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(spifc->dev, res);
+ if (IS_ERR(base)) {
+ ret = PTR_ERR(base);
+ goto out_err;
+ }
+
+ spifc->regmap = devm_regmap_init_mmio(spifc->dev, base,
+ &spifc_regmap_config);
+ if (IS_ERR(spifc->regmap)) {
+ ret = PTR_ERR(spifc->regmap);
+ goto out_err;
+ }
+
+ spifc->clk = devm_clk_get(spifc->dev, NULL);
+ if (IS_ERR(spifc->clk)) {
+ dev_err(spifc->dev, "missing clock\n");
+ ret = PTR_ERR(spifc->clk);
+ goto out_err;
+ }
+
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret) {
+ dev_err(spifc->dev, "can't prepare clock\n");
+ goto out_err;
+ }
+
+ rate = clk_get_rate(spifc->clk);
+
+ master->num_chipselect = 1;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->auto_runtime_pm = true;
+ master->transfer_one = meson_spifc_transfer_one;
+ master->min_speed_hz = rate >> 6;
+ master->max_speed_hz = rate >> 1;
+
+ meson_spifc_hw_init(spifc);
+
+ pm_runtime_set_active(spifc->dev);
+ pm_runtime_enable(spifc->dev);
+
+ ret = devm_spi_register_master(spifc->dev, master);
+ if (ret) {
+ dev_err(spifc->dev, "failed to register spi master\n");
+ goto out_clk;
+ }
+
+ return 0;
+out_clk:
+ clk_disable_unprepare(spifc->clk);
+out_err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int meson_spifc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spifc->clk);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int meson_spifc_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+ int ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(spifc->clk);
+ if (ret)
+ return ret;
+ }
+
+ meson_spifc_hw_init(spifc);
+
+ ret = spi_master_resume(master);
+ if (ret)
+ clk_disable_unprepare(spifc->clk);
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int meson_spifc_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spifc->clk);
+
+ return 0;
+}
+
+static int meson_spifc_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct meson_spifc *spifc = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(spifc->clk);
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops meson_spifc_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(meson_spifc_suspend, meson_spifc_resume)
+ SET_RUNTIME_PM_OPS(meson_spifc_runtime_suspend,
+ meson_spifc_runtime_resume,
+ NULL)
+};
+
+static const struct of_device_id meson_spifc_dt_match[] = {
+ { .compatible = "amlogic,meson6-spifc", },
+ { },
+};
+
+static struct platform_driver meson_spifc_driver = {
+ .probe = meson_spifc_probe,
+ .remove = meson_spifc_remove,
+ .driver = {
+ .name = "meson-spifc",
+ .of_match_table = of_match_ptr(meson_spifc_dt_match),
+ .pm = &meson_spifc_pm_ops,
+ },
+};
+
+module_platform_driver(meson_spifc_driver);
+
+MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
+MODULE_DESCRIPTION("Amlogic Meson SPIFC driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-mpc512x-psc.c b/drivers/spi/spi-mpc512x-psc.c
new file mode 100644
index 000000000..965d2bdcf
--- /dev/null
+++ b/drivers/spi/spi-mpc512x-psc.c
@@ -0,0 +1,610 @@
+/*
+ * MPC512x PSC in SPI mode driver.
+ *
+ * Copyright (C) 2007,2008 Freescale Semiconductor Inc.
+ * Original port from 52xx driver:
+ * Hongjun Chen <hong-jun.chen@freescale.com>
+ *
+ * Fork of mpc52xx_psc_spi.c:
+ * Copyright (C) 2006 TOPTICA Photonics AG., Dragos Carp
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <linux/gpio.h>
+#include <asm/mpc52xx_psc.h>
+
+struct mpc512x_psc_spi {
+ void (*cs_control)(struct spi_device *spi, bool on);
+
+ /* driver internal data */
+ struct mpc52xx_psc __iomem *psc;
+ struct mpc512x_psc_fifo __iomem *fifo;
+ unsigned int irq;
+ u8 bits_per_word;
+ struct clk *clk_mclk;
+ struct clk *clk_ipg;
+ u32 mclk_rate;
+
+ struct completion txisrdone;
+};
+
+/* controller state */
+struct mpc512x_psc_spi_cs {
+ int bits_per_word;
+ int speed_hz;
+};
+
+/* set clock freq, clock ramp, bits per work
+ * if t is NULL then reset the values to the default values
+ */
+static int mpc512x_psc_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+
+ cs->speed_hz = (t && t->speed_hz)
+ ? t->speed_hz : spi->max_speed_hz;
+ cs->bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
+ return 0;
+}
+
+static void mpc512x_psc_spi_activate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ u32 sicr;
+ u32 ccr;
+ int speed;
+ u16 bclkdiv;
+
+ sicr = in_be32(&psc->sicr);
+
+ /* Set clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ sicr |= 0x00001000;
+ else
+ sicr &= ~0x00001000;
+
+ if (spi->mode & SPI_CPOL)
+ sicr |= 0x00002000;
+ else
+ sicr &= ~0x00002000;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ sicr |= 0x10000000;
+ else
+ sicr &= ~0x10000000;
+ out_be32(&psc->sicr, sicr);
+
+ ccr = in_be32(&psc->ccr);
+ ccr &= 0xFF000000;
+ speed = cs->speed_hz;
+ if (!speed)
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
+
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(&psc->ccr, ccr);
+ mps->bits_per_word = cs->bits_per_word;
+
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+}
+
+static void mpc512x_psc_spi_deactivate_cs(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+
+ if (mps->cs_control && gpio_is_valid(spi->cs_gpio))
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+}
+
+/* extract and scale size field in txsz or rxsz */
+#define MPC512x_PSC_FIFO_SZ(sz) ((sz & 0x7ff) << 2);
+
+#define EOFBYTE 1
+
+static int mpc512x_psc_spi_transfer_rxtx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ size_t tx_len = t->len;
+ size_t rx_len = t->len;
+ u8 *tx_buf = (u8 *)t->tx_buf;
+ u8 *rx_buf = (u8 *)t->rx_buf;
+
+ if (!tx_buf && !rx_buf && t->len)
+ return -EINVAL;
+
+ while (rx_len || tx_len) {
+ size_t txcount;
+ u8 data;
+ size_t fifosz;
+ size_t rxcount;
+ int rxtries;
+
+ /*
+ * send the TX bytes in as large a chunk as possible
+ * but neither exceed the TX nor the RX FIFOs
+ */
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->txsz));
+ txcount = min(fifosz, tx_len);
+ fifosz = MPC512x_PSC_FIFO_SZ(in_be32(&fifo->rxsz));
+ fifosz -= in_be32(&fifo->rxcnt) + 1;
+ txcount = min(fifosz, txcount);
+ if (txcount) {
+
+ /* fill the TX FIFO */
+ while (txcount-- > 0) {
+ data = tx_buf ? *tx_buf++ : 0;
+ if (tx_len == EOFBYTE && t->cs_change)
+ setbits32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_EOF);
+ out_8(&fifo->txdata_8, data);
+ tx_len--;
+ }
+
+ /* have the ISR trigger when the TX FIFO is empty */
+ reinit_completion(&mps->txisrdone);
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, MPC512x_PSC_FIFO_EMPTY);
+ wait_for_completion(&mps->txisrdone);
+ }
+
+ /*
+ * consume as much RX data as the FIFO holds, while we
+ * iterate over the transfer's TX data length
+ *
+ * only insist in draining all the remaining RX bytes
+ * when the TX bytes were exhausted (that's at the very
+ * end of this transfer, not when still iterating over
+ * the transfer's chunks)
+ */
+ rxtries = 50;
+ do {
+
+ /*
+ * grab whatever was in the FIFO when we started
+ * looking, don't bother fetching what was added to
+ * the FIFO while we read from it -- we'll return
+ * here eventually and prefer sending out remaining
+ * TX data
+ */
+ fifosz = in_be32(&fifo->rxcnt);
+ rxcount = min(fifosz, rx_len);
+ while (rxcount-- > 0) {
+ data = in_8(&fifo->rxdata_8);
+ if (rx_buf)
+ *rx_buf++ = data;
+ rx_len--;
+ }
+
+ /*
+ * come back later if there still is TX data to send,
+ * bail out of the RX drain loop if all of the TX data
+ * was sent and all of the RX data was received (i.e.
+ * when the transmission has completed)
+ */
+ if (tx_len)
+ break;
+ if (!rx_len)
+ break;
+
+ /*
+ * TX data transmission has completed while RX data
+ * is still pending -- that's a transient situation
+ * which depends on wire speed and specific
+ * hardware implementation details (buffering) yet
+ * should resolve very quickly
+ *
+ * just yield for a moment to not hog the CPU for
+ * too long when running SPI at low speed
+ *
+ * the timeout range is rather arbitrary and tries
+ * to balance throughput against system load; the
+ * chosen values result in a minimal timeout of 50
+ * times 10us and thus work at speeds as low as
+ * some 20kbps, while the maximum timeout at the
+ * transfer's end could be 5ms _if_ nothing else
+ * ticks in the system _and_ RX data still wasn't
+ * received, which only occurs in situations that
+ * are exceptional; removing the unpredictability
+ * of the timeout either decreases throughput
+ * (longer timeouts), or puts more load on the
+ * system (fixed short timeouts) or requires the
+ * use of a timeout API instead of a counter and an
+ * unknown inner delay
+ */
+ usleep_range(10, 100);
+
+ } while (--rxtries > 0);
+ if (!tx_len && rx_len && !rxtries) {
+ /*
+ * not enough RX bytes even after several retries
+ * and the resulting rather long timeout?
+ */
+ rxcount = in_be32(&fifo->rxcnt);
+ dev_warn(&spi->dev,
+ "short xfer, missing %zd RX bytes, FIFO level %zd\n",
+ rx_len, rxcount);
+ }
+
+ /*
+ * drain and drop RX data which "should not be there" in
+ * the first place, for undisturbed transmission this turns
+ * into a NOP (except for the FIFO level fetch)
+ */
+ if (!tx_len && !rx_len) {
+ while (in_be32(&fifo->rxcnt))
+ in_8(&fifo->rxdata_8);
+ }
+
+ }
+ return 0;
+}
+
+static int mpc512x_psc_spi_msg_xfer(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_device *spi;
+ unsigned cs_change;
+ int status;
+ struct spi_transfer *t;
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc512x_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change)
+ mpc512x_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
+
+ status = mpc512x_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+ }
+
+ m->status = status;
+ if (m->complete)
+ m->complete(m->context);
+
+ if (status || !cs_change)
+ mpc512x_psc_spi_deactivate_cs(spi);
+
+ mpc512x_psc_spi_transfer_setup(spi, NULL);
+
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int mpc512x_psc_spi_prep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* Zero MR2 */
+ in_8(&psc->mode);
+ out_8(&psc->mode, 0x0);
+
+ /* enable transmitter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_unprep_xfer_hw(struct spi_master *master)
+{
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ dev_dbg(&master->dev, "%s()\n", __func__);
+
+ /* disable transmitter/receiver and fifo interrupt */
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+ out_be32(&fifo->tximr, 0);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_setup(struct spi_device *spi)
+{
+ struct mpc512x_psc_spi_cs *cs = spi->controller_state;
+ int ret;
+
+ if (spi->bits_per_word % 8)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ ret = gpio_request(spi->cs_gpio, dev_name(&spi->dev));
+ if (ret) {
+ dev_err(&spi->dev, "can't get CS gpio: %d\n",
+ ret);
+ kfree(cs);
+ return ret;
+ }
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ }
+
+ spi->controller_state = cs;
+ }
+
+ cs->bits_per_word = spi->bits_per_word;
+ cs->speed_hz = spi->max_speed_hz;
+
+ return 0;
+}
+
+static void mpc512x_psc_spi_cleanup(struct spi_device *spi)
+{
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
+ kfree(spi->controller_state);
+}
+
+static int mpc512x_psc_spi_port_config(struct spi_master *master,
+ struct mpc512x_psc_spi *mps)
+{
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+ u32 sicr;
+ u32 ccr;
+ int speed;
+ u16 bclkdiv;
+
+ /* Reset the PSC into a known state */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ /* Disable psc interrupts all useful interrupts are in fifo */
+ out_be16(&psc->isr_imr.imr, 0);
+
+ /* Disable fifo interrupts, will be enabled later */
+ out_be32(&fifo->tximr, 0);
+ out_be32(&fifo->rximr, 0);
+
+ /* Setup fifo slice address and size */
+ /*out_be32(&fifo->txsz, 0x0fe00004);*/
+ /*out_be32(&fifo->rxsz, 0x0ff00004);*/
+
+ sicr = 0x01000000 | /* SIM = 0001 -- 8 bit */
+ 0x00800000 | /* GenClk = 1 -- internal clk */
+ 0x00008000 | /* SPI = 1 */
+ 0x00004000 | /* MSTR = 1 -- SPI master */
+ 0x00000800; /* UseEOF = 1 -- SS low until EOF */
+
+ out_be32(&psc->sicr, sicr);
+
+ ccr = in_be32(&psc->ccr);
+ ccr &= 0xFF000000;
+ speed = 1000000; /* default 1MHz */
+ bclkdiv = (mps->mclk_rate / speed) - 1;
+ ccr |= (((bclkdiv & 0xff) << 16) | (((bclkdiv >> 8) & 0xff) << 8));
+ out_be32(&psc->ccr, ccr);
+
+ /* Set 2ms DTL delay */
+ out_8(&psc->ctur, 0x00);
+ out_8(&psc->ctlr, 0x82);
+
+ /* we don't use the alarms */
+ out_be32(&fifo->rxalarm, 0xfff);
+ out_be32(&fifo->txalarm, 0);
+
+ /* Enable FIFO slices for Rx/Tx */
+ out_be32(&fifo->rxcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+ out_be32(&fifo->txcmd,
+ MPC512x_PSC_FIFO_ENABLE_SLICE | MPC512x_PSC_FIFO_ENABLE_DMA);
+
+ mps->bits_per_word = 8;
+
+ return 0;
+}
+
+static irqreturn_t mpc512x_psc_spi_isr(int irq, void *dev_id)
+{
+ struct mpc512x_psc_spi *mps = (struct mpc512x_psc_spi *)dev_id;
+ struct mpc512x_psc_fifo __iomem *fifo = mps->fifo;
+
+ /* clear interrupt and wake up the rx/tx routine */
+ if (in_be32(&fifo->txisr) &
+ in_be32(&fifo->tximr) & MPC512x_PSC_FIFO_EMPTY) {
+ out_be32(&fifo->txisr, MPC512x_PSC_FIFO_EMPTY);
+ out_be32(&fifo->tximr, 0);
+ complete(&mps->txisrdone);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+static void mpc512x_spi_cs_control(struct spi_device *spi, bool onoff)
+{
+ gpio_set_value(spi->cs_gpio, onoff);
+}
+
+static int mpc512x_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc512x_psc_spi *mps;
+ struct spi_master *master;
+ int ret;
+ void *tempp;
+ struct clk *clk;
+
+ master = spi_alloc_master(dev, sizeof *mps);
+ if (master == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+ mps = spi_master_get_devdata(master);
+ mps->irq = irq;
+
+ if (pdata == NULL) {
+ mps->cs_control = mpc512x_spi_cs_control;
+ } else {
+ mps->cs_control = pdata->cs_control;
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+ }
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->setup = mpc512x_psc_spi_setup;
+ master->prepare_transfer_hardware = mpc512x_psc_spi_prep_xfer_hw;
+ master->transfer_one_message = mpc512x_psc_spi_msg_xfer;
+ master->unprepare_transfer_hardware = mpc512x_psc_spi_unprep_xfer_hw;
+ master->cleanup = mpc512x_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
+
+ tempp = devm_ioremap(dev, regaddr, size);
+ if (!tempp) {
+ dev_err(dev, "could not ioremap I/O port range\n");
+ ret = -EFAULT;
+ goto free_master;
+ }
+ mps->psc = tempp;
+ mps->fifo =
+ (struct mpc512x_psc_fifo *)(tempp + sizeof(struct mpc52xx_psc));
+ ret = devm_request_irq(dev, mps->irq, mpc512x_psc_spi_isr, IRQF_SHARED,
+ "mpc512x-psc-spi", mps);
+ if (ret)
+ goto free_master;
+ init_completion(&mps->txisrdone);
+
+ clk = devm_clk_get(dev, "mclk");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_master;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_master;
+ mps->clk_mclk = clk;
+ mps->mclk_rate = clk_get_rate(clk);
+
+ clk = devm_clk_get(dev, "ipg");
+ if (IS_ERR(clk)) {
+ ret = PTR_ERR(clk);
+ goto free_mclk_clock;
+ }
+ ret = clk_prepare_enable(clk);
+ if (ret)
+ goto free_mclk_clock;
+ mps->clk_ipg = clk;
+
+ ret = mpc512x_psc_spi_port_config(master, mps);
+ if (ret < 0)
+ goto free_ipg_clock;
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret < 0)
+ goto free_ipg_clock;
+
+ return ret;
+
+free_ipg_clock:
+ clk_disable_unprepare(mps->clk_ipg);
+free_mclk_clock:
+ clk_disable_unprepare(mps->clk_mclk);
+free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int mpc512x_psc_spi_do_remove(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct mpc512x_psc_spi *mps = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(mps->clk_mclk);
+ clk_disable_unprepare(mps->clk_ipg);
+
+ return 0;
+}
+
+static int mpc512x_psc_spi_of_probe(struct platform_device *op)
+{
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+
+ regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ dev_err(&op->dev, "Invalid PSC address\n");
+ return -EINVAL;
+ }
+ regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
+
+ return mpc512x_psc_spi_do_probe(&op->dev, (u32) regaddr64, (u32) size64,
+ irq_of_parse_and_map(op->dev.of_node, 0));
+}
+
+static int mpc512x_psc_spi_of_remove(struct platform_device *op)
+{
+ return mpc512x_psc_spi_do_remove(&op->dev);
+}
+
+static const struct of_device_id mpc512x_psc_spi_of_match[] = {
+ { .compatible = "fsl,mpc5121-psc-spi", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, mpc512x_psc_spi_of_match);
+
+static struct platform_driver mpc512x_psc_spi_of_driver = {
+ .probe = mpc512x_psc_spi_of_probe,
+ .remove = mpc512x_psc_spi_of_remove,
+ .driver = {
+ .name = "mpc512x-psc-spi",
+ .of_match_table = mpc512x_psc_spi_of_match,
+ },
+};
+module_platform_driver(mpc512x_psc_spi_of_driver);
+
+MODULE_AUTHOR("John Rigby");
+MODULE_DESCRIPTION("MPC512x PSC SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc52xx-psc.c b/drivers/spi/spi-mpc52xx-psc.c
new file mode 100644
index 000000000..72d11ebef
--- /dev/null
+++ b/drivers/spi/spi-mpc52xx-psc.c
@@ -0,0 +1,518 @@
+/*
+ * MPC52xx PSC in SPI mode driver.
+ *
+ * Maintainer: Dragos Carp
+ *
+ * Copyright (C) 2006 TOPTICA Photonics AG.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/of_address.h>
+#include <linux/of_platform.h>
+#include <linux/workqueue.h>
+#include <linux/completion.h>
+#include <linux/io.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/fsl_devices.h>
+#include <linux/slab.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#define MCLK 20000000 /* PSC port MClk in hz */
+
+struct mpc52xx_psc_spi {
+ /* fsl_spi_platform data */
+ void (*cs_control)(struct spi_device *spi, bool on);
+ u32 sysclk;
+
+ /* driver internal data */
+ struct mpc52xx_psc __iomem *psc;
+ struct mpc52xx_psc_fifo __iomem *fifo;
+ unsigned int irq;
+ u8 bits_per_word;
+ u8 busy;
+
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+
+ struct list_head queue;
+ spinlock_t lock;
+
+ struct completion done;
+};
+
+/* controller state */
+struct mpc52xx_psc_spi_cs {
+ int bits_per_word;
+ int speed_hz;
+};
+
+/* set clock freq, clock ramp, bits per work
+ * if t is NULL then reset the values to the default values
+ */
+static int mpc52xx_psc_spi_transfer_setup(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+
+ cs->speed_hz = (t && t->speed_hz)
+ ? t->speed_hz : spi->max_speed_hz;
+ cs->bits_per_word = (t && t->bits_per_word)
+ ? t->bits_per_word : spi->bits_per_word;
+ cs->bits_per_word = ((cs->bits_per_word + 7) / 8) * 8;
+ return 0;
+}
+
+static void mpc52xx_psc_spi_activate_cs(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ u32 sicr;
+ u16 ccr;
+
+ sicr = in_be32(&psc->sicr);
+
+ /* Set clock phase and polarity */
+ if (spi->mode & SPI_CPHA)
+ sicr |= 0x00001000;
+ else
+ sicr &= ~0x00001000;
+ if (spi->mode & SPI_CPOL)
+ sicr |= 0x00002000;
+ else
+ sicr &= ~0x00002000;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ sicr |= 0x10000000;
+ else
+ sicr &= ~0x10000000;
+ out_be32(&psc->sicr, sicr);
+
+ /* Set clock frequency and bits per word
+ * Because psc->ccr is defined as 16bit register instead of 32bit
+ * just set the lower byte of BitClkDiv
+ */
+ ccr = in_be16((u16 __iomem *)&psc->ccr);
+ ccr &= 0xFF00;
+ if (cs->speed_hz)
+ ccr |= (MCLK / cs->speed_hz - 1) & 0xFF;
+ else /* by default SPI Clk 1MHz */
+ ccr |= (MCLK / 1000000 - 1) & 0xFF;
+ out_be16((u16 __iomem *)&psc->ccr, ccr);
+ mps->bits_per_word = cs->bits_per_word;
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 1 : 0);
+}
+
+static void mpc52xx_psc_spi_deactivate_cs(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+
+ if (mps->cs_control)
+ mps->cs_control(spi, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+}
+
+#define MPC52xx_PSC_BUFSIZE (MPC52xx_PSC_RFNUM_MASK + 1)
+/* wake up when 80% fifo full */
+#define MPC52xx_PSC_RFALARM (MPC52xx_PSC_BUFSIZE * 20 / 100)
+
+static int mpc52xx_psc_spi_transfer_rxtx(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
+ unsigned rb = 0; /* number of bytes receieved */
+ unsigned sb = 0; /* number of bytes sent */
+ unsigned char *rx_buf = (unsigned char *)t->rx_buf;
+ unsigned char *tx_buf = (unsigned char *)t->tx_buf;
+ unsigned rfalarm;
+ unsigned send_at_once = MPC52xx_PSC_BUFSIZE;
+ unsigned recv_at_once;
+ int last_block = 0;
+
+ if (!t->tx_buf && !t->rx_buf && t->len)
+ return -EINVAL;
+
+ /* enable transmiter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_ENABLE | MPC52xx_PSC_RX_ENABLE);
+ while (rb < t->len) {
+ if (t->len - rb > MPC52xx_PSC_BUFSIZE) {
+ rfalarm = MPC52xx_PSC_RFALARM;
+ last_block = 0;
+ } else {
+ send_at_once = t->len - sb;
+ rfalarm = MPC52xx_PSC_BUFSIZE - (t->len - rb);
+ last_block = 1;
+ }
+
+ dev_dbg(&spi->dev, "send %d bytes...\n", send_at_once);
+ for (; send_at_once; sb++, send_at_once--) {
+ /* set EOF flag before the last word is sent */
+ if (send_at_once == 1 && last_block)
+ out_8(&psc->ircr2, 0x01);
+
+ if (tx_buf)
+ out_8(&psc->mpc52xx_psc_buffer_8, tx_buf[sb]);
+ else
+ out_8(&psc->mpc52xx_psc_buffer_8, 0);
+ }
+
+
+ /* enable interrupts and wait for wake up
+ * if just one byte is expected the Rx FIFO genererates no
+ * FFULL interrupt, so activate the RxRDY interrupt
+ */
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ if (t->len - rb == 1) {
+ out_8(&psc->mode, 0);
+ } else {
+ out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
+ out_be16(&fifo->rfalarm, rfalarm);
+ }
+ out_be16(&psc->mpc52xx_psc_imr, MPC52xx_PSC_IMR_RXRDY);
+ wait_for_completion(&mps->done);
+ recv_at_once = in_be16(&fifo->rfnum);
+ dev_dbg(&spi->dev, "%d bytes received\n", recv_at_once);
+
+ send_at_once = recv_at_once;
+ if (rx_buf) {
+ for (; recv_at_once; rb++, recv_at_once--)
+ rx_buf[rb] = in_8(&psc->mpc52xx_psc_buffer_8);
+ } else {
+ for (; recv_at_once; rb++, recv_at_once--)
+ in_8(&psc->mpc52xx_psc_buffer_8);
+ }
+ }
+ /* disable transmiter/receiver */
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ return 0;
+}
+
+static void mpc52xx_psc_spi_work(struct work_struct *work)
+{
+ struct mpc52xx_psc_spi *mps =
+ container_of(work, struct mpc52xx_psc_spi, work);
+
+ spin_lock_irq(&mps->lock);
+ mps->busy = 1;
+ while (!list_empty(&mps->queue)) {
+ struct spi_message *m;
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ unsigned cs_change;
+ int status;
+
+ m = container_of(mps->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irq(&mps->lock);
+
+ spi = m->spi;
+ cs_change = 1;
+ status = 0;
+ list_for_each_entry (t, &m->transfers, transfer_list) {
+ if (t->bits_per_word || t->speed_hz) {
+ status = mpc52xx_psc_spi_transfer_setup(spi, t);
+ if (status < 0)
+ break;
+ }
+
+ if (cs_change)
+ mpc52xx_psc_spi_activate_cs(spi);
+ cs_change = t->cs_change;
+
+ status = mpc52xx_psc_spi_transfer_rxtx(spi, t);
+ if (status)
+ break;
+ m->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change)
+ mpc52xx_psc_spi_deactivate_cs(spi);
+ }
+
+ m->status = status;
+ if (m->complete)
+ m->complete(m->context);
+
+ if (status || !cs_change)
+ mpc52xx_psc_spi_deactivate_cs(spi);
+
+ mpc52xx_psc_spi_transfer_setup(spi, NULL);
+
+ spin_lock_irq(&mps->lock);
+ }
+ mps->busy = 0;
+ spin_unlock_irq(&mps->lock);
+}
+
+static int mpc52xx_psc_spi_setup(struct spi_device *spi)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ struct mpc52xx_psc_spi_cs *cs = spi->controller_state;
+ unsigned long flags;
+
+ if (spi->bits_per_word%8)
+ return -EINVAL;
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ cs->bits_per_word = spi->bits_per_word;
+ cs->speed_hz = spi->max_speed_hz;
+
+ spin_lock_irqsave(&mps->lock, flags);
+ if (!mps->busy)
+ mpc52xx_psc_spi_deactivate_cs(spi);
+ spin_unlock_irqrestore(&mps->lock, flags);
+
+ return 0;
+}
+
+static int mpc52xx_psc_spi_transfer(struct spi_device *spi,
+ struct spi_message *m)
+{
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&mps->lock, flags);
+ list_add_tail(&m->queue, &mps->queue);
+ queue_work(mps->workqueue, &mps->work);
+ spin_unlock_irqrestore(&mps->lock, flags);
+
+ return 0;
+}
+
+static void mpc52xx_psc_spi_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static int mpc52xx_psc_spi_port_config(int psc_id, struct mpc52xx_psc_spi *mps)
+{
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+ struct mpc52xx_psc_fifo __iomem *fifo = mps->fifo;
+ u32 mclken_div;
+ int ret;
+
+ /* default sysclk is 512MHz */
+ mclken_div = (mps->sysclk ? mps->sysclk : 512000000) / MCLK;
+ ret = mpc52xx_set_psc_clkdiv(psc_id, mclken_div);
+ if (ret)
+ return ret;
+
+ /* Reset the PSC into a known state */
+ out_8(&psc->command, MPC52xx_PSC_RST_RX);
+ out_8(&psc->command, MPC52xx_PSC_RST_TX);
+ out_8(&psc->command, MPC52xx_PSC_TX_DISABLE | MPC52xx_PSC_RX_DISABLE);
+
+ /* Disable interrupts, interrupts are based on alarm level */
+ out_be16(&psc->mpc52xx_psc_imr, 0);
+ out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+ out_8(&fifo->rfcntl, 0);
+ out_8(&psc->mode, MPC52xx_PSC_MODE_FFULL);
+
+ /* Configure 8bit codec mode as a SPI master and use EOF flags */
+ /* SICR_SIM_CODEC8|SICR_GENCLK|SICR_SPI|SICR_MSTR|SICR_USEEOF */
+ out_be32(&psc->sicr, 0x0180C800);
+ out_be16((u16 __iomem *)&psc->ccr, 0x070F); /* default SPI Clk 1MHz */
+
+ /* Set 2ms DTL delay */
+ out_8(&psc->ctur, 0x00);
+ out_8(&psc->ctlr, 0x84);
+
+ mps->bits_per_word = 8;
+
+ return 0;
+}
+
+static irqreturn_t mpc52xx_psc_spi_isr(int irq, void *dev_id)
+{
+ struct mpc52xx_psc_spi *mps = (struct mpc52xx_psc_spi *)dev_id;
+ struct mpc52xx_psc __iomem *psc = mps->psc;
+
+ /* disable interrupt and wake up the work queue */
+ if (in_be16(&psc->mpc52xx_psc_isr) & MPC52xx_PSC_IMR_RXRDY) {
+ out_be16(&psc->mpc52xx_psc_imr, 0);
+ complete(&mps->done);
+ return IRQ_HANDLED;
+ }
+ return IRQ_NONE;
+}
+
+/* bus_num is used only for the case dev->platform_data == NULL */
+static int mpc52xx_psc_spi_do_probe(struct device *dev, u32 regaddr,
+ u32 size, unsigned int irq, s16 bus_num)
+{
+ struct fsl_spi_platform_data *pdata = dev_get_platdata(dev);
+ struct mpc52xx_psc_spi *mps;
+ struct spi_master *master;
+ int ret;
+
+ master = spi_alloc_master(dev, sizeof *mps);
+ if (master == NULL)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, master);
+ mps = spi_master_get_devdata(master);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+
+ mps->irq = irq;
+ if (pdata == NULL) {
+ dev_warn(dev,
+ "probe called without platform data, no cs_control function will be called\n");
+ mps->cs_control = NULL;
+ mps->sysclk = 0;
+ master->bus_num = bus_num;
+ master->num_chipselect = 255;
+ } else {
+ mps->cs_control = pdata->cs_control;
+ mps->sysclk = pdata->sysclk;
+ master->bus_num = pdata->bus_num;
+ master->num_chipselect = pdata->max_chipselect;
+ }
+ master->setup = mpc52xx_psc_spi_setup;
+ master->transfer = mpc52xx_psc_spi_transfer;
+ master->cleanup = mpc52xx_psc_spi_cleanup;
+ master->dev.of_node = dev->of_node;
+
+ mps->psc = ioremap(regaddr, size);
+ if (!mps->psc) {
+ dev_err(dev, "could not ioremap I/O port range\n");
+ ret = -EFAULT;
+ goto free_master;
+ }
+ /* On the 5200, fifo regs are immediately ajacent to the psc regs */
+ mps->fifo = ((void __iomem *)mps->psc) + sizeof(struct mpc52xx_psc);
+
+ ret = request_irq(mps->irq, mpc52xx_psc_spi_isr, 0, "mpc52xx-psc-spi",
+ mps);
+ if (ret)
+ goto free_master;
+
+ ret = mpc52xx_psc_spi_port_config(master->bus_num, mps);
+ if (ret < 0) {
+ dev_err(dev, "can't configure PSC! Is it capable of SPI?\n");
+ goto free_irq;
+ }
+
+ spin_lock_init(&mps->lock);
+ init_completion(&mps->done);
+ INIT_WORK(&mps->work, mpc52xx_psc_spi_work);
+ INIT_LIST_HEAD(&mps->queue);
+
+ mps->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (mps->workqueue == NULL) {
+ ret = -EBUSY;
+ goto free_irq;
+ }
+
+ ret = spi_register_master(master);
+ if (ret < 0)
+ goto unreg_master;
+
+ return ret;
+
+unreg_master:
+ destroy_workqueue(mps->workqueue);
+free_irq:
+ free_irq(mps->irq, mps);
+free_master:
+ if (mps->psc)
+ iounmap(mps->psc);
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int mpc52xx_psc_spi_of_probe(struct platform_device *op)
+{
+ const u32 *regaddr_p;
+ u64 regaddr64, size64;
+ s16 id = -1;
+
+ regaddr_p = of_get_address(op->dev.of_node, 0, &size64, NULL);
+ if (!regaddr_p) {
+ dev_err(&op->dev, "Invalid PSC address\n");
+ return -EINVAL;
+ }
+ regaddr64 = of_translate_address(op->dev.of_node, regaddr_p);
+
+ /* get PSC id (1..6, used by port_config) */
+ if (op->dev.platform_data == NULL) {
+ const u32 *psc_nump;
+
+ psc_nump = of_get_property(op->dev.of_node, "cell-index", NULL);
+ if (!psc_nump || *psc_nump > 5) {
+ dev_err(&op->dev, "Invalid cell-index property\n");
+ return -EINVAL;
+ }
+ id = *psc_nump + 1;
+ }
+
+ return mpc52xx_psc_spi_do_probe(&op->dev, (u32)regaddr64, (u32)size64,
+ irq_of_parse_and_map(op->dev.of_node, 0), id);
+}
+
+static int mpc52xx_psc_spi_of_remove(struct platform_device *op)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
+ struct mpc52xx_psc_spi *mps = spi_master_get_devdata(master);
+
+ flush_workqueue(mps->workqueue);
+ destroy_workqueue(mps->workqueue);
+ spi_unregister_master(master);
+ free_irq(mps->irq, mps);
+ if (mps->psc)
+ iounmap(mps->psc);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_psc_spi_of_match[] = {
+ { .compatible = "fsl,mpc5200-psc-spi", },
+ { .compatible = "mpc5200-psc-spi", }, /* old */
+ {}
+};
+
+MODULE_DEVICE_TABLE(of, mpc52xx_psc_spi_of_match);
+
+static struct platform_driver mpc52xx_psc_spi_of_driver = {
+ .probe = mpc52xx_psc_spi_of_probe,
+ .remove = mpc52xx_psc_spi_of_remove,
+ .driver = {
+ .name = "mpc52xx-psc-spi",
+ .of_match_table = mpc52xx_psc_spi_of_match,
+ },
+};
+module_platform_driver(mpc52xx_psc_spi_of_driver);
+
+MODULE_AUTHOR("Dragos Carp");
+MODULE_DESCRIPTION("MPC52xx PSC SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-mpc52xx.c b/drivers/spi/spi-mpc52xx.c
new file mode 100644
index 000000000..c36002110
--- /dev/null
+++ b/drivers/spi/spi-mpc52xx.c
@@ -0,0 +1,551 @@
+/*
+ * MPC52xx SPI bus driver.
+ *
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
+ *
+ * This file is released under the GPLv2
+ *
+ * This is the driver for the MPC5200's dedicated SPI controller.
+ *
+ * Note: this driver does not support the MPC5200 PSC in SPI mode. For
+ * that driver see drivers/spi/mpc52xx_psc_spi.c
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/of_platform.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/io.h>
+#include <linux/of_gpio.h>
+#include <linux/slab.h>
+#include <asm/time.h>
+#include <asm/mpc52xx.h>
+
+MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>");
+MODULE_DESCRIPTION("MPC52xx SPI (non-PSC) Driver");
+MODULE_LICENSE("GPL");
+
+/* Register offsets */
+#define SPI_CTRL1 0x00
+#define SPI_CTRL1_SPIE (1 << 7)
+#define SPI_CTRL1_SPE (1 << 6)
+#define SPI_CTRL1_MSTR (1 << 4)
+#define SPI_CTRL1_CPOL (1 << 3)
+#define SPI_CTRL1_CPHA (1 << 2)
+#define SPI_CTRL1_SSOE (1 << 1)
+#define SPI_CTRL1_LSBFE (1 << 0)
+
+#define SPI_CTRL2 0x01
+#define SPI_BRR 0x04
+
+#define SPI_STATUS 0x05
+#define SPI_STATUS_SPIF (1 << 7)
+#define SPI_STATUS_WCOL (1 << 6)
+#define SPI_STATUS_MODF (1 << 4)
+
+#define SPI_DATA 0x09
+#define SPI_PORTDATA 0x0d
+#define SPI_DATADIR 0x10
+
+/* FSM state return values */
+#define FSM_STOP 0 /* Nothing more for the state machine to */
+ /* do. If something interesting happens */
+ /* then an IRQ will be received */
+#define FSM_POLL 1 /* need to poll for completion, an IRQ is */
+ /* not expected */
+#define FSM_CONTINUE 2 /* Keep iterating the state machine */
+
+/* Driver internal data */
+struct mpc52xx_spi {
+ struct spi_master *master;
+ void __iomem *regs;
+ int irq0; /* MODF irq */
+ int irq1; /* SPIF irq */
+ unsigned int ipb_freq;
+
+ /* Statistics; not used now, but will be reintroduced for debugfs */
+ int msg_count;
+ int wcol_count;
+ int wcol_ticks;
+ u32 wcol_tx_timestamp;
+ int modf_count;
+ int byte_count;
+
+ struct list_head queue; /* queue of pending messages */
+ spinlock_t lock;
+ struct work_struct work;
+
+ /* Details of current transfer (length, and buffer pointers) */
+ struct spi_message *message; /* current message */
+ struct spi_transfer *transfer; /* current transfer */
+ int (*state)(int irq, struct mpc52xx_spi *ms, u8 status, u8 data);
+ int len;
+ int timestamp;
+ u8 *rx_buf;
+ const u8 *tx_buf;
+ int cs_change;
+ int gpio_cs_count;
+ unsigned int *gpio_cs;
+};
+
+/*
+ * CS control function
+ */
+static void mpc52xx_spi_chipsel(struct mpc52xx_spi *ms, int value)
+{
+ int cs;
+
+ if (ms->gpio_cs_count > 0) {
+ cs = ms->message->spi->chip_select;
+ gpio_set_value(ms->gpio_cs[cs], value ? 0 : 1);
+ } else
+ out_8(ms->regs + SPI_PORTDATA, value ? 0 : 0x08);
+}
+
+/*
+ * Start a new transfer. This is called both by the idle state
+ * for the first transfer in a message, and by the wait state when the
+ * previous transfer in a message is complete.
+ */
+static void mpc52xx_spi_start_transfer(struct mpc52xx_spi *ms)
+{
+ ms->rx_buf = ms->transfer->rx_buf;
+ ms->tx_buf = ms->transfer->tx_buf;
+ ms->len = ms->transfer->len;
+
+ /* Activate the chip select */
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 1);
+ ms->cs_change = ms->transfer->cs_change;
+
+ /* Write out the first byte */
+ ms->wcol_tx_timestamp = get_tbl();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+}
+
+/* Forward declaration of state handlers */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+static int mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data);
+
+/*
+ * IDLE state
+ *
+ * No transfers are in progress; if another transfer is pending then retrieve
+ * it and kick it off. Otherwise, stop processing the state machine
+ */
+static int
+mpc52xx_spi_fsmstate_idle(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ struct spi_device *spi;
+ int spr, sppr;
+ u8 ctrl1;
+
+ if (status && (irq != NO_IRQ))
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ /* Check if there is another transfer waiting. */
+ if (list_empty(&ms->queue))
+ return FSM_STOP;
+
+ /* get the head of the queue */
+ ms->message = list_first_entry(&ms->queue, struct spi_message, queue);
+ list_del_init(&ms->message->queue);
+
+ /* Setup the controller parameters */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ spi = ms->message->spi;
+ if (spi->mode & SPI_CPHA)
+ ctrl1 |= SPI_CTRL1_CPHA;
+ if (spi->mode & SPI_CPOL)
+ ctrl1 |= SPI_CTRL1_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ ctrl1 |= SPI_CTRL1_LSBFE;
+ out_8(ms->regs + SPI_CTRL1, ctrl1);
+
+ /* Setup the controller speed */
+ /* minimum divider is '2'. Also, add '1' to force rounding the
+ * divider up. */
+ sppr = ((ms->ipb_freq / ms->message->spi->max_speed_hz) + 1) >> 1;
+ spr = 0;
+ if (sppr < 1)
+ sppr = 1;
+ while (((sppr - 1) & ~0x7) != 0) {
+ sppr = (sppr + 1) >> 1; /* add '1' to force rounding up */
+ spr++;
+ }
+ sppr--; /* sppr quantity in register is offset by 1 */
+ if (spr > 7) {
+ /* Don't overrun limits of SPI baudrate register */
+ spr = 7;
+ sppr = 7;
+ }
+ out_8(ms->regs + SPI_BRR, sppr << 4 | spr); /* Set speed */
+
+ ms->cs_change = 1;
+ ms->transfer = container_of(ms->message->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * TRANSFER state
+ *
+ * In the middle of a transfer. If the SPI core has completed processing
+ * a byte, then read out the received data and write out the next byte
+ * (unless this transfer is finished; in which case go on to the wait
+ * state)
+ */
+static int mpc52xx_spi_fsmstate_transfer(int irq, struct mpc52xx_spi *ms,
+ u8 status, u8 data)
+{
+ if (!status)
+ return ms->irq0 ? FSM_STOP : FSM_POLL;
+
+ if (status & SPI_STATUS_WCOL) {
+ /* The SPI controller is stoopid. At slower speeds, it may
+ * raise the SPIF flag before the state machine is actually
+ * finished, which causes a collision (internal to the state
+ * machine only). The manual recommends inserting a delay
+ * between receiving the interrupt and sending the next byte,
+ * but it can also be worked around simply by retrying the
+ * transfer which is what we do here. */
+ ms->wcol_count++;
+ ms->wcol_ticks += get_tbl() - ms->wcol_tx_timestamp;
+ ms->wcol_tx_timestamp = get_tbl();
+ data = 0;
+ if (ms->tx_buf)
+ data = *(ms->tx_buf - 1);
+ out_8(ms->regs + SPI_DATA, data); /* try again */
+ return FSM_CONTINUE;
+ } else if (status & SPI_STATUS_MODF) {
+ ms->modf_count++;
+ dev_err(&ms->master->dev, "mode fault\n");
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = -EIO;
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* Read data out of the spi device */
+ ms->byte_count++;
+ if (ms->rx_buf)
+ *ms->rx_buf++ = data;
+
+ /* Is the transfer complete? */
+ ms->len--;
+ if (ms->len == 0) {
+ ms->timestamp = get_tbl();
+ ms->timestamp += ms->transfer->delay_usecs * tb_ticks_per_usec;
+ ms->state = mpc52xx_spi_fsmstate_wait;
+ return FSM_CONTINUE;
+ }
+
+ /* Write out the next byte */
+ ms->wcol_tx_timestamp = get_tbl();
+ if (ms->tx_buf)
+ out_8(ms->regs + SPI_DATA, *ms->tx_buf++);
+ else
+ out_8(ms->regs + SPI_DATA, 0);
+
+ return FSM_CONTINUE;
+}
+
+/*
+ * WAIT state
+ *
+ * A transfer has completed; need to wait for the delay period to complete
+ * before starting the next transfer
+ */
+static int
+mpc52xx_spi_fsmstate_wait(int irq, struct mpc52xx_spi *ms, u8 status, u8 data)
+{
+ if (status && irq)
+ dev_err(&ms->master->dev, "spurious irq, status=0x%.2x\n",
+ status);
+
+ if (((int)get_tbl()) - ms->timestamp < 0)
+ return FSM_POLL;
+
+ ms->message->actual_length += ms->transfer->len;
+
+ /* Check if there is another transfer in this message. If there
+ * aren't then deactivate CS, notify sender, and drop back to idle
+ * to start the next message. */
+ if (ms->transfer->transfer_list.next == &ms->message->transfers) {
+ ms->msg_count++;
+ mpc52xx_spi_chipsel(ms, 0);
+ ms->message->status = 0;
+ if (ms->message->complete)
+ ms->message->complete(ms->message->context);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ return FSM_CONTINUE;
+ }
+
+ /* There is another transfer; kick it off */
+
+ if (ms->cs_change)
+ mpc52xx_spi_chipsel(ms, 0);
+
+ ms->transfer = container_of(ms->transfer->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ mpc52xx_spi_start_transfer(ms);
+ ms->state = mpc52xx_spi_fsmstate_transfer;
+ return FSM_CONTINUE;
+}
+
+/**
+ * mpc52xx_spi_fsm_process - Finite State Machine iteration function
+ * @irq: irq number that triggered the FSM or 0 for polling
+ * @ms: pointer to mpc52xx_spi driver data
+ */
+static void mpc52xx_spi_fsm_process(int irq, struct mpc52xx_spi *ms)
+{
+ int rc = FSM_CONTINUE;
+ u8 status, data;
+
+ while (rc == FSM_CONTINUE) {
+ /* Interrupt cleared by read of STATUS followed by
+ * read of DATA registers */
+ status = in_8(ms->regs + SPI_STATUS);
+ data = in_8(ms->regs + SPI_DATA);
+ rc = ms->state(irq, ms, status, data);
+ }
+
+ if (rc == FSM_POLL)
+ schedule_work(&ms->work);
+}
+
+/**
+ * mpc52xx_spi_irq - IRQ handler
+ */
+static irqreturn_t mpc52xx_spi_irq(int irq, void *_ms)
+{
+ struct mpc52xx_spi *ms = _ms;
+ spin_lock(&ms->lock);
+ mpc52xx_spi_fsm_process(irq, ms);
+ spin_unlock(&ms->lock);
+ return IRQ_HANDLED;
+}
+
+/**
+ * mpc52xx_spi_wq - Workqueue function for polling the state machine
+ */
+static void mpc52xx_spi_wq(struct work_struct *work)
+{
+ struct mpc52xx_spi *ms = container_of(work, struct mpc52xx_spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ mpc52xx_spi_fsm_process(0, ms);
+ spin_unlock_irqrestore(&ms->lock, flags);
+}
+
+/*
+ * spi_master ops
+ */
+
+static int mpc52xx_spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct mpc52xx_spi *ms = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ m->actual_length = 0;
+ m->status = -EINPROGRESS;
+
+ spin_lock_irqsave(&ms->lock, flags);
+ list_add_tail(&m->queue, &ms->queue);
+ spin_unlock_irqrestore(&ms->lock, flags);
+ schedule_work(&ms->work);
+
+ return 0;
+}
+
+/*
+ * OF Platform Bus Binding
+ */
+static int mpc52xx_spi_probe(struct platform_device *op)
+{
+ struct spi_master *master;
+ struct mpc52xx_spi *ms;
+ void __iomem *regs;
+ u8 ctrl1;
+ int rc, i = 0;
+ int gpio_cs;
+
+ /* MMIO registers */
+ dev_dbg(&op->dev, "probing mpc5200 SPI device\n");
+ regs = of_iomap(op->dev.of_node, 0);
+ if (!regs)
+ return -ENODEV;
+
+ /* initialize the device */
+ ctrl1 = SPI_CTRL1_SPIE | SPI_CTRL1_SPE | SPI_CTRL1_MSTR;
+ out_8(regs + SPI_CTRL1, ctrl1);
+ out_8(regs + SPI_CTRL2, 0x0);
+ out_8(regs + SPI_DATADIR, 0xe); /* Set output pins */
+ out_8(regs + SPI_PORTDATA, 0x8); /* Deassert /SS signal */
+
+ /* Clear the status register and re-read it to check for a MODF
+ * failure. This driver cannot currently handle multiple masters
+ * on the SPI bus. This fault will also occur if the SPI signals
+ * are not connected to any pins (port_config setting) */
+ in_8(regs + SPI_STATUS);
+ out_8(regs + SPI_CTRL1, ctrl1);
+
+ in_8(regs + SPI_DATA);
+ if (in_8(regs + SPI_STATUS) & SPI_STATUS_MODF) {
+ dev_err(&op->dev, "mode fault; is port_config correct?\n");
+ rc = -EIO;
+ goto err_init;
+ }
+
+ dev_dbg(&op->dev, "allocating spi_master struct\n");
+ master = spi_alloc_master(&op->dev, sizeof *ms);
+ if (!master) {
+ rc = -ENOMEM;
+ goto err_alloc;
+ }
+
+ master->transfer = mpc52xx_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = op->dev.of_node;
+
+ platform_set_drvdata(op, master);
+
+ ms = spi_master_get_devdata(master);
+ ms->master = master;
+ ms->regs = regs;
+ ms->irq0 = irq_of_parse_and_map(op->dev.of_node, 0);
+ ms->irq1 = irq_of_parse_and_map(op->dev.of_node, 1);
+ ms->state = mpc52xx_spi_fsmstate_idle;
+ ms->ipb_freq = mpc5xxx_get_bus_frequency(op->dev.of_node);
+ ms->gpio_cs_count = of_gpio_count(op->dev.of_node);
+ if (ms->gpio_cs_count > 0) {
+ master->num_chipselect = ms->gpio_cs_count;
+ ms->gpio_cs = kmalloc(ms->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!ms->gpio_cs) {
+ rc = -ENOMEM;
+ goto err_alloc_gpio;
+ }
+
+ for (i = 0; i < ms->gpio_cs_count; i++) {
+ gpio_cs = of_get_gpio(op->dev.of_node, i);
+ if (gpio_cs < 0) {
+ dev_err(&op->dev,
+ "could not parse the gpio field "
+ "in oftree\n");
+ rc = -ENODEV;
+ goto err_gpio;
+ }
+
+ rc = gpio_request(gpio_cs, dev_name(&op->dev));
+ if (rc) {
+ dev_err(&op->dev,
+ "can't request spi cs gpio #%d "
+ "on gpio line %d\n", i, gpio_cs);
+ goto err_gpio;
+ }
+
+ gpio_direction_output(gpio_cs, 1);
+ ms->gpio_cs[i] = gpio_cs;
+ }
+ }
+
+ spin_lock_init(&ms->lock);
+ INIT_LIST_HEAD(&ms->queue);
+ INIT_WORK(&ms->work, mpc52xx_spi_wq);
+
+ /* Decide if interrupts can be used */
+ if (ms->irq0 && ms->irq1) {
+ rc = request_irq(ms->irq0, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-modf", ms);
+ rc |= request_irq(ms->irq1, mpc52xx_spi_irq, 0,
+ "mpc5200-spi-spif", ms);
+ if (rc) {
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+ ms->irq0 = ms->irq1 = 0;
+ }
+ } else {
+ /* operate in polled mode */
+ ms->irq0 = ms->irq1 = 0;
+ }
+
+ if (!ms->irq0)
+ dev_info(&op->dev, "using polled mode\n");
+
+ dev_dbg(&op->dev, "registering spi_master struct\n");
+ rc = spi_register_master(master);
+ if (rc)
+ goto err_register;
+
+ dev_info(&ms->master->dev, "registered MPC5200 SPI bus\n");
+
+ return rc;
+
+ err_register:
+ dev_err(&ms->master->dev, "initialization failed\n");
+ err_gpio:
+ while (i-- > 0)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ err_alloc_gpio:
+ spi_master_put(master);
+ err_alloc:
+ err_init:
+ iounmap(regs);
+ return rc;
+}
+
+static int mpc52xx_spi_remove(struct platform_device *op)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(op));
+ struct mpc52xx_spi *ms = spi_master_get_devdata(master);
+ int i;
+
+ free_irq(ms->irq0, ms);
+ free_irq(ms->irq1, ms);
+
+ for (i = 0; i < ms->gpio_cs_count; i++)
+ gpio_free(ms->gpio_cs[i]);
+
+ kfree(ms->gpio_cs);
+ spi_unregister_master(master);
+ iounmap(ms->regs);
+ spi_master_put(master);
+
+ return 0;
+}
+
+static const struct of_device_id mpc52xx_spi_match[] = {
+ { .compatible = "fsl,mpc5200-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mpc52xx_spi_match);
+
+static struct platform_driver mpc52xx_spi_of_driver = {
+ .driver = {
+ .name = "mpc52xx-spi",
+ .of_match_table = mpc52xx_spi_match,
+ },
+ .probe = mpc52xx_spi_probe,
+ .remove = mpc52xx_spi_remove,
+};
+module_platform_driver(mpc52xx_spi_of_driver);
diff --git a/drivers/spi/spi-mxs.c b/drivers/spi/spi-mxs.c
new file mode 100644
index 000000000..5b0e9a3e8
--- /dev/null
+++ b/drivers/spi/spi-mxs.c
@@ -0,0 +1,583 @@
+/*
+ * Freescale MXS SPI master driver
+ *
+ * Copyright 2012 DENX Software Engineering, GmbH.
+ * Copyright 2012 Freescale Semiconductor, Inc.
+ * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
+ *
+ * Rework and transition to new API by:
+ * Marek Vasut <marex@denx.de>
+ *
+ * Based on previous attempt by:
+ * Fabio Estevam <fabio.estevam@freescale.com>
+ *
+ * Based on code from U-Boot bootloader by:
+ * Marek Vasut <marex@denx.de>
+ *
+ * Based on spi-stmp.c, which is:
+ * Author: Dmitry Pervushin <dimka@embeddedalley.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/highmem.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/completion.h>
+#include <linux/gpio.h>
+#include <linux/regulator/consumer.h>
+#include <linux/module.h>
+#include <linux/stmp_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/mxs-spi.h>
+
+#define DRIVER_NAME "mxs-spi"
+
+/* Use 10S timeout for very long transfers, it should suffice. */
+#define SSP_TIMEOUT 10000
+
+#define SG_MAXLEN 0xff00
+
+/*
+ * Flags for txrx functions. More efficient that using an argument register for
+ * each one.
+ */
+#define TXRX_WRITE (1<<0) /* This is a write */
+#define TXRX_DEASSERT_CS (1<<1) /* De-assert CS at end of txrx */
+
+struct mxs_spi {
+ struct mxs_ssp ssp;
+ struct completion c;
+ unsigned int sck; /* Rate requested (vs actual) */
+};
+
+static int mxs_spi_setup_transfer(struct spi_device *dev,
+ const struct spi_transfer *t)
+{
+ struct mxs_spi *spi = spi_master_get_devdata(dev->master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ const unsigned int hz = min(dev->max_speed_hz, t->speed_hz);
+
+ if (hz == 0) {
+ dev_err(&dev->dev, "SPI clock rate of zero not allowed\n");
+ return -EINVAL;
+ }
+
+ if (hz != spi->sck) {
+ mxs_ssp_set_clk_rate(ssp, hz);
+ /*
+ * Save requested rate, hz, rather than the actual rate,
+ * ssp->clk_rate. Otherwise we would set the rate every transfer
+ * when the actual rate is not quite the same as requested rate.
+ */
+ spi->sck = hz;
+ /*
+ * Perhaps we should return an error if the actual clock is
+ * nowhere close to what was requested?
+ */
+ }
+
+ writel(BM_SSP_CTRL0_LOCK_CS,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ writel(BF_SSP_CTRL1_SSP_MODE(BV_SSP_CTRL1_SSP_MODE__SPI) |
+ BF_SSP_CTRL1_WORD_LENGTH(BV_SSP_CTRL1_WORD_LENGTH__EIGHT_BITS) |
+ ((dev->mode & SPI_CPOL) ? BM_SSP_CTRL1_POLARITY : 0) |
+ ((dev->mode & SPI_CPHA) ? BM_SSP_CTRL1_PHASE : 0),
+ ssp->base + HW_SSP_CTRL1(ssp));
+
+ writel(0x0, ssp->base + HW_SSP_CMD0);
+ writel(0x0, ssp->base + HW_SSP_CMD1);
+
+ return 0;
+}
+
+static u32 mxs_spi_cs_to_reg(unsigned cs)
+{
+ u32 select = 0;
+
+ /*
+ * i.MX28 Datasheet: 17.10.1: HW_SSP_CTRL0
+ *
+ * The bits BM_SSP_CTRL0_WAIT_FOR_CMD and BM_SSP_CTRL0_WAIT_FOR_IRQ
+ * in HW_SSP_CTRL0 register do have multiple usage, please refer to
+ * the datasheet for further details. In SPI mode, they are used to
+ * toggle the chip-select lines (nCS pins).
+ */
+ if (cs & 1)
+ select |= BM_SSP_CTRL0_WAIT_FOR_CMD;
+ if (cs & 2)
+ select |= BM_SSP_CTRL0_WAIT_FOR_IRQ;
+
+ return select;
+}
+
+static int mxs_ssp_wait(struct mxs_spi *spi, int offset, int mask, bool set)
+{
+ const unsigned long timeout = jiffies + msecs_to_jiffies(SSP_TIMEOUT);
+ struct mxs_ssp *ssp = &spi->ssp;
+ u32 reg;
+
+ do {
+ reg = readl_relaxed(ssp->base + offset);
+
+ if (!set)
+ reg = ~reg;
+
+ reg &= mask;
+
+ if (reg == mask)
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIMEDOUT;
+}
+
+static void mxs_ssp_dma_irq_callback(void *param)
+{
+ struct mxs_spi *spi = param;
+
+ complete(&spi->c);
+}
+
+static irqreturn_t mxs_ssp_irq_handler(int irq, void *dev_id)
+{
+ struct mxs_ssp *ssp = dev_id;
+
+ dev_err(ssp->dev, "%s[%i] CTRL1=%08x STATUS=%08x\n",
+ __func__, __LINE__,
+ readl(ssp->base + HW_SSP_CTRL1(ssp)),
+ readl(ssp->base + HW_SSP_STATUS(ssp)));
+ return IRQ_HANDLED;
+}
+
+static int mxs_spi_txrx_dma(struct mxs_spi *spi,
+ unsigned char *buf, int len,
+ unsigned int flags)
+{
+ struct mxs_ssp *ssp = &spi->ssp;
+ struct dma_async_tx_descriptor *desc = NULL;
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : SG_MAXLEN;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ int sg_count;
+ int min, ret;
+ u32 ctrl0;
+ struct page *vm_page;
+ struct {
+ u32 pio[4];
+ struct scatterlist sg;
+ } *dma_xfer;
+
+ if (!len)
+ return -EINVAL;
+
+ dma_xfer = kcalloc(sgs, sizeof(*dma_xfer), GFP_KERNEL);
+ if (!dma_xfer)
+ return -ENOMEM;
+
+ reinit_completion(&spi->c);
+
+ /* Chip select was already programmed into CTRL0 */
+ ctrl0 = readl(ssp->base + HW_SSP_CTRL0);
+ ctrl0 &= ~(BM_SSP_CTRL0_XFER_COUNT | BM_SSP_CTRL0_IGNORE_CRC |
+ BM_SSP_CTRL0_READ);
+ ctrl0 |= BM_SSP_CTRL0_DATA_XFER;
+
+ if (!(flags & TXRX_WRITE))
+ ctrl0 |= BM_SSP_CTRL0_READ;
+
+ /* Queue the DMA data transfer. */
+ for (sg_count = 0; sg_count < sgs; sg_count++) {
+ /* Prepare the transfer descriptor. */
+ min = min(len, desc_len);
+
+ /*
+ * De-assert CS on last segment if flag is set (i.e., no more
+ * transfers will follow)
+ */
+ if ((sg_count + 1 == sgs) && (flags & TXRX_DEASSERT_CS))
+ ctrl0 |= BM_SSP_CTRL0_IGNORE_CRC;
+
+ if (ssp->devid == IMX23_SSP) {
+ ctrl0 &= ~BM_SSP_CTRL0_XFER_COUNT;
+ ctrl0 |= min;
+ }
+
+ dma_xfer[sg_count].pio[0] = ctrl0;
+ dma_xfer[sg_count].pio[3] = min;
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ ret = -ENOMEM;
+ goto err_vmalloc;
+ }
+
+ sg_init_table(&dma_xfer[sg_count].sg, 1);
+ sg_set_page(&dma_xfer[sg_count].sg, vm_page,
+ min, offset_in_page(buf));
+ } else {
+ sg_init_one(&dma_xfer[sg_count].sg, buf, min);
+ }
+
+ ret = dma_map_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+
+ len -= min;
+ buf += min;
+
+ /* Queue the PIO register write transfer. */
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ (struct scatterlist *)dma_xfer[sg_count].pio,
+ (ssp->devid == IMX23_SSP) ? 1 : 4,
+ DMA_TRANS_NONE,
+ sg_count ? DMA_PREP_INTERRUPT : 0);
+ if (!desc) {
+ dev_err(ssp->dev,
+ "Failed to get PIO reg. write descriptor.\n");
+ ret = -EINVAL;
+ goto err_mapped;
+ }
+
+ desc = dmaengine_prep_slave_sg(ssp->dmach,
+ &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+
+ if (!desc) {
+ dev_err(ssp->dev,
+ "Failed to get DMA data write descriptor.\n");
+ ret = -EINVAL;
+ goto err_mapped;
+ }
+ }
+
+ /*
+ * The last descriptor must have this callback,
+ * to finish the DMA transaction.
+ */
+ desc->callback = mxs_ssp_dma_irq_callback;
+ desc->callback_param = spi;
+
+ /* Start the transfer. */
+ dmaengine_submit(desc);
+ dma_async_issue_pending(ssp->dmach);
+
+ if (!wait_for_completion_timeout(&spi->c,
+ msecs_to_jiffies(SSP_TIMEOUT))) {
+ dev_err(ssp->dev, "DMA transfer timeout\n");
+ ret = -ETIMEDOUT;
+ dmaengine_terminate_all(ssp->dmach);
+ goto err_vmalloc;
+ }
+
+ ret = 0;
+
+err_vmalloc:
+ while (--sg_count >= 0) {
+err_mapped:
+ dma_unmap_sg(ssp->dev, &dma_xfer[sg_count].sg, 1,
+ (flags & TXRX_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
+ }
+
+ kfree(dma_xfer);
+
+ return ret;
+}
+
+static int mxs_spi_txrx_pio(struct mxs_spi *spi,
+ unsigned char *buf, int len,
+ unsigned int flags)
+{
+ struct mxs_ssp *ssp = &spi->ssp;
+
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+
+ while (len--) {
+ if (len == 0 && (flags & TXRX_DEASSERT_CS))
+ writel(BM_SSP_CTRL0_IGNORE_CRC,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (ssp->devid == IMX23_SSP) {
+ writel(BM_SSP_CTRL0_XFER_COUNT,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(1,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+ } else {
+ writel(1, ssp->base + HW_SSP_XFER_SIZE);
+ }
+
+ if (flags & TXRX_WRITE)
+ writel(BM_SSP_CTRL0_READ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ else
+ writel(BM_SSP_CTRL0_READ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ writel(BM_SSP_CTRL0_RUN,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 1))
+ return -ETIMEDOUT;
+
+ if (flags & TXRX_WRITE)
+ writel(*buf, ssp->base + HW_SSP_DATA(ssp));
+
+ writel(BM_SSP_CTRL0_DATA_XFER,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ if (!(flags & TXRX_WRITE)) {
+ if (mxs_ssp_wait(spi, HW_SSP_STATUS(ssp),
+ BM_SSP_STATUS_FIFO_EMPTY, 0))
+ return -ETIMEDOUT;
+
+ *buf = (readl(ssp->base + HW_SSP_DATA(ssp)) & 0xff);
+ }
+
+ if (mxs_ssp_wait(spi, HW_SSP_CTRL0, BM_SSP_CTRL0_RUN, 0))
+ return -ETIMEDOUT;
+
+ buf++;
+ }
+
+ if (len <= 0)
+ return 0;
+
+ return -ETIMEDOUT;
+}
+
+static int mxs_spi_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct mxs_spi *spi = spi_master_get_devdata(master);
+ struct mxs_ssp *ssp = &spi->ssp;
+ struct spi_transfer *t;
+ unsigned int flag;
+ int status = 0;
+
+ /* Program CS register bits here, it will be used for all transfers. */
+ writel(BM_SSP_CTRL0_WAIT_FOR_CMD | BM_SSP_CTRL0_WAIT_FOR_IRQ,
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
+ writel(mxs_spi_cs_to_reg(m->spi->chip_select),
+ ssp->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+
+ status = mxs_spi_setup_transfer(m->spi, t);
+ if (status)
+ break;
+
+ /* De-assert on last transfer, inverted by cs_change flag */
+ flag = (&t->transfer_list == m->transfers.prev) ^ t->cs_change ?
+ TXRX_DEASSERT_CS : 0;
+
+ /*
+ * Small blocks can be transfered via PIO.
+ * Measured by empiric means:
+ *
+ * dd if=/dev/mtdblock0 of=/dev/null bs=1024k count=1
+ *
+ * DMA only: 2.164808 seconds, 473.0KB/s
+ * Combined: 1.676276 seconds, 610.9KB/s
+ */
+ if (t->len < 32) {
+ writel(BM_SSP_CTRL1_DMA_ENABLE,
+ ssp->base + HW_SSP_CTRL1(ssp) +
+ STMP_OFFSET_REG_CLR);
+
+ if (t->tx_buf)
+ status = mxs_spi_txrx_pio(spi,
+ (void *)t->tx_buf,
+ t->len, flag | TXRX_WRITE);
+ if (t->rx_buf)
+ status = mxs_spi_txrx_pio(spi,
+ t->rx_buf, t->len,
+ flag);
+ } else {
+ writel(BM_SSP_CTRL1_DMA_ENABLE,
+ ssp->base + HW_SSP_CTRL1(ssp) +
+ STMP_OFFSET_REG_SET);
+
+ if (t->tx_buf)
+ status = mxs_spi_txrx_dma(spi,
+ (void *)t->tx_buf, t->len,
+ flag | TXRX_WRITE);
+ if (t->rx_buf)
+ status = mxs_spi_txrx_dma(spi,
+ t->rx_buf, t->len,
+ flag);
+ }
+
+ if (status) {
+ stmp_reset_block(ssp->base);
+ break;
+ }
+
+ m->actual_length += t->len;
+ }
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static const struct of_device_id mxs_spi_dt_ids[] = {
+ { .compatible = "fsl,imx23-spi", .data = (void *) IMX23_SSP, },
+ { .compatible = "fsl,imx28-spi", .data = (void *) IMX28_SSP, },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, mxs_spi_dt_ids);
+
+static int mxs_spi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id =
+ of_match_device(mxs_spi_dt_ids, &pdev->dev);
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct mxs_spi *spi;
+ struct mxs_ssp *ssp;
+ struct resource *iores;
+ struct clk *clk;
+ void __iomem *base;
+ int devid, clk_freq;
+ int ret = 0, irq_err;
+
+ /*
+ * Default clock speed for the SPI core. 160MHz seems to
+ * work reasonably well with most SPI flashes, so use this
+ * as a default. Override with "clock-frequency" DT prop.
+ */
+ const int clk_freq_default = 160000000;
+
+ iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ irq_err = platform_get_irq(pdev, 0);
+ if (irq_err < 0)
+ return irq_err;
+
+ base = devm_ioremap_resource(&pdev->dev, iores);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
+
+ devid = (enum mxs_ssp_id) of_id->data;
+ ret = of_property_read_u32(np, "clock-frequency",
+ &clk_freq);
+ if (ret)
+ clk_freq = clk_freq_default;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->transfer_one_message = mxs_spi_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->num_chipselect = 3;
+ master->dev.of_node = np;
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
+ spi = spi_master_get_devdata(master);
+ ssp = &spi->ssp;
+ ssp->dev = &pdev->dev;
+ ssp->clk = clk;
+ ssp->base = base;
+ ssp->devid = devid;
+
+ init_completion(&spi->c);
+
+ ret = devm_request_irq(&pdev->dev, irq_err, mxs_ssp_irq_handler, 0,
+ dev_name(&pdev->dev), ssp);
+ if (ret)
+ goto out_master_free;
+
+ ssp->dmach = dma_request_slave_channel(&pdev->dev, "rx-tx");
+ if (!ssp->dmach) {
+ dev_err(ssp->dev, "Failed to request DMA\n");
+ ret = -ENODEV;
+ goto out_master_free;
+ }
+
+ ret = clk_prepare_enable(ssp->clk);
+ if (ret)
+ goto out_dma_release;
+
+ clk_set_rate(ssp->clk, clk_freq);
+
+ ret = stmp_reset_block(ssp->base);
+ if (ret)
+ goto out_disable_clk;
+
+ platform_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot register SPI master, %d\n", ret);
+ goto out_disable_clk;
+ }
+
+ return 0;
+
+out_disable_clk:
+ clk_disable_unprepare(ssp->clk);
+out_dma_release:
+ dma_release_channel(ssp->dmach);
+out_master_free:
+ spi_master_put(master);
+ return ret;
+}
+
+static int mxs_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct mxs_spi *spi;
+ struct mxs_ssp *ssp;
+
+ master = platform_get_drvdata(pdev);
+ spi = spi_master_get_devdata(master);
+ ssp = &spi->ssp;
+
+ clk_disable_unprepare(ssp->clk);
+ dma_release_channel(ssp->dmach);
+
+ return 0;
+}
+
+static struct platform_driver mxs_spi_driver = {
+ .probe = mxs_spi_probe,
+ .remove = mxs_spi_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = mxs_spi_dt_ids,
+ },
+};
+
+module_platform_driver(mxs_spi_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("MXS SPI master driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:mxs-spi");
diff --git a/drivers/spi/spi-nuc900.c b/drivers/spi/spi-nuc900.c
new file mode 100644
index 000000000..f51a058e7
--- /dev/null
+++ b/drivers/spi/spi-nuc900.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2009 Nuvoton technology.
+ * Wan ZongShun <mcuos.com@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <linux/platform_data/spi-nuc900.h>
+
+/* usi registers offset */
+#define USI_CNT 0x00
+#define USI_DIV 0x04
+#define USI_SSR 0x08
+#define USI_RX0 0x10
+#define USI_TX0 0x10
+
+/* usi register bit */
+#define ENINT (0x01 << 17)
+#define ENFLG (0x01 << 16)
+#define SLEEP (0x0f << 12)
+#define TXNUM (0x03 << 8)
+#define TXBITLEN (0x1f << 3)
+#define TXNEG (0x01 << 2)
+#define RXNEG (0x01 << 1)
+#define LSB (0x01 << 10)
+#define SELECTLEV (0x01 << 2)
+#define SELECTPOL (0x01 << 31)
+#define SELECTSLAVE 0x01
+#define GOBUSY 0x01
+
+struct nuc900_spi {
+ struct spi_bitbang bitbang;
+ struct completion done;
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+ const unsigned char *tx;
+ unsigned char *rx;
+ struct clk *clk;
+ struct spi_master *master;
+ struct nuc900_spi_info *pdata;
+ spinlock_t lock;
+};
+
+static inline struct nuc900_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void nuc900_slave_select(struct spi_device *spi, unsigned int ssr)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+ unsigned int val;
+ unsigned int cs = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ unsigned int cpol = spi->mode & SPI_CPOL ? 1 : 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_SSR);
+
+ if (!cs)
+ val &= ~SELECTLEV;
+ else
+ val |= SELECTLEV;
+
+ if (!ssr)
+ val &= ~SELECTSLAVE;
+ else
+ val |= SELECTSLAVE;
+
+ __raw_writel(val, hw->regs + USI_SSR);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (!cpol)
+ val &= ~SELECTPOL;
+ else
+ val |= SELECTPOL;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_chipsel(struct spi_device *spi, int value)
+{
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ nuc900_slave_select(spi, 0);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ nuc900_slave_select(spi, 1);
+ break;
+ }
+}
+
+static void nuc900_spi_setup_txnum(struct nuc900_spi *hw, unsigned int txnum)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXNUM;
+
+ if (txnum)
+ val |= txnum << 0x08;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+
+}
+
+static void nuc900_spi_setup_txbitlen(struct nuc900_spi *hw,
+ unsigned int txbitlen)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT) & ~TXBITLEN;
+
+ val |= (txbitlen << 0x03);
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_spi_gobusy(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= GOBUSY;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static inline unsigned int hw_txbyte(struct nuc900_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0;
+}
+
+static int nuc900_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct nuc900_spi *hw = to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ __raw_writel(hw_txbyte(hw, 0x0), hw->regs + USI_TX0);
+
+ nuc900_spi_gobusy(hw);
+
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static irqreturn_t nuc900_spi_irq(int irq, void *dev)
+{
+ struct nuc900_spi *hw = dev;
+ unsigned int status;
+ unsigned int count = hw->count;
+
+ status = __raw_readl(hw->regs + USI_CNT);
+ __raw_writel(status, hw->regs + USI_CNT);
+
+ if (status & ENFLG) {
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = __raw_readl(hw->regs + USI_RX0);
+ count++;
+
+ if (count < hw->len) {
+ __raw_writel(hw_txbyte(hw, count), hw->regs + USI_TX0);
+ nuc900_spi_gobusy(hw);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+ }
+
+ complete(&hw->done);
+ return IRQ_HANDLED;
+}
+
+static void nuc900_tx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= TXNEG;
+ else
+ val &= ~TXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_rx_edge(struct nuc900_spi *hw, unsigned int edge)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (edge)
+ val |= RXNEG;
+ else
+ val &= ~RXNEG;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_send_first(struct nuc900_spi *hw, unsigned int lsb)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ if (lsb)
+ val |= LSB;
+ else
+ val &= ~LSB;
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_sleep(struct nuc900_spi *hw, unsigned int sleep)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT) & ~SLEEP;
+
+ if (sleep)
+ val |= (sleep << 12);
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_enable_int(struct nuc900_spi *hw)
+{
+ unsigned int val;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hw->lock, flags);
+
+ val = __raw_readl(hw->regs + USI_CNT);
+
+ val |= ENINT;
+
+ __raw_writel(val, hw->regs + USI_CNT);
+
+ spin_unlock_irqrestore(&hw->lock, flags);
+}
+
+static void nuc900_set_divider(struct nuc900_spi *hw)
+{
+ __raw_writel(hw->pdata->divider, hw->regs + USI_DIV);
+}
+
+static void nuc900_init_spi(struct nuc900_spi *hw)
+{
+ clk_enable(hw->clk);
+ spin_lock_init(&hw->lock);
+
+ nuc900_tx_edge(hw, hw->pdata->txneg);
+ nuc900_rx_edge(hw, hw->pdata->rxneg);
+ nuc900_send_first(hw, hw->pdata->lsb);
+ nuc900_set_sleep(hw, hw->pdata->sleep);
+ nuc900_spi_setup_txbitlen(hw, hw->pdata->txbitlen);
+ nuc900_spi_setup_txnum(hw, hw->pdata->txnum);
+ nuc900_set_divider(hw);
+ nuc900_enable_int(hw);
+}
+
+static int nuc900_spi_probe(struct platform_device *pdev)
+{
+ struct nuc900_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct nuc900_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ return -ENOMEM;
+ }
+
+ hw = spi_master_get_devdata(master);
+ hw->master = master;
+ hw->pdata = dev_get_platdata(&pdev->dev);
+
+ if (hw->pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ if (hw->pdata->lsb)
+ master->mode_bits |= SPI_LSB_FIRST;
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = hw->pdata->bus_num;
+ hw->bitbang.master = hw->master;
+ hw->bitbang.chipselect = nuc900_spi_chipsel;
+ hw->bitbang.txrx_bufs = nuc900_spi_txrx;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
+ goto err_pdata;
+ }
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ specified\n");
+ err = -ENOENT;
+ goto err_pdata;
+ }
+
+ err = devm_request_irq(&pdev->dev, hw->irq, nuc900_spi_irq, 0,
+ pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_pdata;
+ }
+
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ err = PTR_ERR(hw->clk);
+ goto err_pdata;
+ }
+
+ mfp_set_groupg(&pdev->dev, NULL);
+ nuc900_init_spi(hw);
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ return 0;
+
+err_register:
+ clk_disable(hw->clk);
+err_pdata:
+ spi_master_put(hw->master);
+ return err;
+}
+
+static int nuc900_spi_remove(struct platform_device *dev)
+{
+ struct nuc900_spi *hw = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&hw->bitbang);
+ clk_disable(hw->clk);
+ spi_master_put(hw->master);
+ return 0;
+}
+
+static struct platform_driver nuc900_spi_driver = {
+ .probe = nuc900_spi_probe,
+ .remove = nuc900_spi_remove,
+ .driver = {
+ .name = "nuc900-spi",
+ },
+};
+module_platform_driver(nuc900_spi_driver);
+
+MODULE_AUTHOR("Wan ZongShun <mcuos.com@gmail.com>");
+MODULE_DESCRIPTION("nuc900 spi driver!");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:nuc900-spi");
diff --git a/drivers/spi/spi-oc-tiny.c b/drivers/spi/spi-oc-tiny.c
new file mode 100644
index 000000000..76656a77e
--- /dev/null
+++ b/drivers/spi/spi-oc-tiny.c
@@ -0,0 +1,363 @@
+/*
+ * OpenCores tiny SPI master driver
+ *
+ * http://opencores.org/project,tiny_spi
+ *
+ * Copyright (C) 2011 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on spi_s3c24xx.c, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/spi_oc_tiny.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+
+#define DRV_NAME "spi_oc_tiny"
+
+#define TINY_SPI_RXDATA 0
+#define TINY_SPI_TXDATA 4
+#define TINY_SPI_STATUS 8
+#define TINY_SPI_CONTROL 12
+#define TINY_SPI_BAUD 16
+
+#define TINY_SPI_STATUS_TXE 0x1
+#define TINY_SPI_STATUS_TXR 0x2
+
+struct tiny_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *base;
+ int irq;
+ unsigned int freq;
+ unsigned int baudwidth;
+ unsigned int baud;
+ unsigned int speed_hz;
+ unsigned int mode;
+ unsigned int len;
+ unsigned int txc, rxc;
+ const u8 *txp;
+ u8 *rxp;
+ int gpio_cs_count;
+ int *gpio_cs;
+};
+
+static inline struct tiny_spi *tiny_spi_to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static unsigned int tiny_spi_baud(struct spi_device *spi, unsigned int hz)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ return min(DIV_ROUND_UP(hw->freq, hz * 2), (1U << hw->baudwidth)) - 1;
+}
+
+static void tiny_spi_chipselect(struct spi_device *spi, int is_active)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (hw->gpio_cs_count > 0) {
+ gpio_set_value(hw->gpio_cs[spi->chip_select],
+ (spi->mode & SPI_CS_HIGH) ? is_active : !is_active);
+ }
+}
+
+static int tiny_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ unsigned int baud = hw->baud;
+
+ if (t) {
+ if (t->speed_hz && t->speed_hz != hw->speed_hz)
+ baud = tiny_spi_baud(spi, t->speed_hz);
+ }
+ writel(baud, hw->base + TINY_SPI_BAUD);
+ writel(hw->mode, hw->base + TINY_SPI_CONTROL);
+ return 0;
+}
+
+static int tiny_spi_setup(struct spi_device *spi)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+
+ if (spi->max_speed_hz != hw->speed_hz) {
+ hw->speed_hz = spi->max_speed_hz;
+ hw->baud = tiny_spi_baud(spi, hw->speed_hz);
+ }
+ hw->mode = spi->mode & (SPI_CPOL | SPI_CPHA);
+ return 0;
+}
+
+static inline void tiny_spi_wait_txr(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXR))
+ cpu_relax();
+}
+
+static inline void tiny_spi_wait_txe(struct tiny_spi *hw)
+{
+ while (!(readb(hw->base + TINY_SPI_STATUS) &
+ TINY_SPI_STATUS_TXE))
+ cpu_relax();
+}
+
+static int tiny_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct tiny_spi *hw = tiny_spi_to_hw(spi);
+ const u8 *txp = t->tx_buf;
+ u8 *rxp = t->rx_buf;
+ unsigned int i;
+
+ if (hw->irq >= 0) {
+ /* use interrupt driven data transfer */
+ hw->len = t->len;
+ hw->txp = t->tx_buf;
+ hw->rxp = t->rx_buf;
+ hw->txc = 0;
+ hw->rxc = 0;
+
+ /* send the first byte */
+ if (t->len > 1) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR, hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXE, hw->base + TINY_SPI_STATUS);
+ }
+
+ wait_for_completion(&hw->done);
+ } else {
+ /* we need to tighten the transfer loop */
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+ for (i = 1; i < t->len; i++) {
+ writeb(txp ? *txp++ : 0, hw->base + TINY_SPI_TXDATA);
+
+ if (rxp || (i != t->len - 1))
+ tiny_spi_wait_txr(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ }
+ tiny_spi_wait_txe(hw);
+ if (rxp)
+ *rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ }
+
+ return t->len;
+}
+
+static irqreturn_t tiny_spi_irq(int irq, void *dev)
+{
+ struct tiny_spi *hw = dev;
+
+ writeb(0, hw->base + TINY_SPI_STATUS);
+ if (hw->rxc + 1 == hw->len) {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_RXDATA);
+ hw->rxc++;
+ complete(&hw->done);
+ } else {
+ if (hw->rxp)
+ *hw->rxp++ = readb(hw->base + TINY_SPI_TXDATA);
+ hw->rxc++;
+ if (hw->txc < hw->len) {
+ writeb(hw->txp ? *hw->txp++ : 0,
+ hw->base + TINY_SPI_TXDATA);
+ hw->txc++;
+ writeb(TINY_SPI_STATUS_TXR,
+ hw->base + TINY_SPI_STATUS);
+ } else {
+ writeb(TINY_SPI_STATUS_TXE,
+ hw->base + TINY_SPI_STATUS);
+ }
+ }
+ return IRQ_HANDLED;
+}
+
+#ifdef CONFIG_OF
+#include <linux/of_gpio.h>
+
+static int tiny_spi_of_probe(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct device_node *np = pdev->dev.of_node;
+ unsigned int i;
+ const __be32 *val;
+ int len;
+
+ if (!np)
+ return 0;
+ hw->gpio_cs_count = of_gpio_count(np);
+ if (hw->gpio_cs_count > 0) {
+ hw->gpio_cs = devm_kzalloc(&pdev->dev,
+ hw->gpio_cs_count * sizeof(unsigned int),
+ GFP_KERNEL);
+ if (!hw->gpio_cs)
+ return -ENOMEM;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ hw->gpio_cs[i] = of_get_gpio_flags(np, i, NULL);
+ if (hw->gpio_cs[i] < 0)
+ return -ENODEV;
+ }
+ hw->bitbang.master->dev.of_node = pdev->dev.of_node;
+ val = of_get_property(pdev->dev.of_node,
+ "clock-frequency", &len);
+ if (val && len >= sizeof(__be32))
+ hw->freq = be32_to_cpup(val);
+ val = of_get_property(pdev->dev.of_node, "baud-width", &len);
+ if (val && len >= sizeof(__be32))
+ hw->baudwidth = be32_to_cpup(val);
+ return 0;
+}
+#else /* !CONFIG_OF */
+static int tiny_spi_of_probe(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_OF */
+
+static int tiny_spi_probe(struct platform_device *pdev)
+{
+ struct tiny_spi_platform_data *platp = dev_get_platdata(&pdev->dev);
+ struct tiny_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ unsigned int i;
+ int err = -ENODEV;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct tiny_spi));
+ if (!master)
+ return err;
+
+ /* setup the master state. */
+ master->bus_num = pdev->id;
+ master->num_chipselect = 255;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tiny_spi_setup;
+
+ hw = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hw);
+
+ /* setup the state for the bitbang driver */
+ hw->bitbang.master = master;
+ hw->bitbang.setup_transfer = tiny_spi_setup_transfer;
+ hw->bitbang.chipselect = tiny_spi_chipselect;
+ hw->bitbang.txrx_bufs = tiny_spi_txrx_bufs;
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->base)) {
+ err = PTR_ERR(hw->base);
+ goto exit;
+ }
+ /* irq is optional */
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq >= 0) {
+ init_completion(&hw->done);
+ err = devm_request_irq(&pdev->dev, hw->irq, tiny_spi_irq, 0,
+ pdev->name, hw);
+ if (err)
+ goto exit;
+ }
+ /* find platform data */
+ if (platp) {
+ hw->gpio_cs_count = platp->gpio_cs_count;
+ hw->gpio_cs = platp->gpio_cs;
+ if (platp->gpio_cs_count && !platp->gpio_cs) {
+ err = -EBUSY;
+ goto exit;
+ }
+ hw->freq = platp->freq;
+ hw->baudwidth = platp->baudwidth;
+ } else {
+ err = tiny_spi_of_probe(pdev);
+ if (err)
+ goto exit;
+ }
+ for (i = 0; i < hw->gpio_cs_count; i++) {
+ err = gpio_request(hw->gpio_cs[i], dev_name(&pdev->dev));
+ if (err)
+ goto exit_gpio;
+ gpio_direction_output(hw->gpio_cs[i], 1);
+ }
+ hw->bitbang.master->num_chipselect = max(1, hw->gpio_cs_count);
+
+ /* register our spi controller */
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err)
+ goto exit;
+ dev_info(&pdev->dev, "base %p, irq %d\n", hw->base, hw->irq);
+
+ return 0;
+
+exit_gpio:
+ while (i-- > 0)
+ gpio_free(hw->gpio_cs[i]);
+exit:
+ spi_master_put(master);
+ return err;
+}
+
+static int tiny_spi_remove(struct platform_device *pdev)
+{
+ struct tiny_spi *hw = platform_get_drvdata(pdev);
+ struct spi_master *master = hw->bitbang.master;
+ unsigned int i;
+
+ spi_bitbang_stop(&hw->bitbang);
+ for (i = 0; i < hw->gpio_cs_count; i++)
+ gpio_free(hw->gpio_cs[i]);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_OF
+static const struct of_device_id tiny_spi_match[] = {
+ { .compatible = "opencores,tiny-spi-rtlsvn2", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, tiny_spi_match);
+#endif /* CONFIG_OF */
+
+static struct platform_driver tiny_spi_driver = {
+ .probe = tiny_spi_probe,
+ .remove = tiny_spi_remove,
+ .driver = {
+ .name = DRV_NAME,
+ .pm = NULL,
+ .of_match_table = of_match_ptr(tiny_spi_match),
+ },
+};
+module_platform_driver(tiny_spi_driver);
+
+MODULE_DESCRIPTION("OpenCores tiny SPI driver");
+MODULE_AUTHOR("Thomas Chou <thomas@wytron.com.tw>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/spi/spi-octeon.c b/drivers/spi/spi-octeon.c
new file mode 100644
index 000000000..e99d6a93d
--- /dev/null
+++ b/drivers/spi/spi-octeon.c
@@ -0,0 +1,260 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011, 2012 Cavium, Inc.
+ */
+
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/of.h>
+
+#include <asm/octeon/octeon.h>
+#include <asm/octeon/cvmx-mpi-defs.h>
+
+#define OCTEON_SPI_CFG 0
+#define OCTEON_SPI_STS 0x08
+#define OCTEON_SPI_TX 0x10
+#define OCTEON_SPI_DAT0 0x80
+
+#define OCTEON_SPI_MAX_BYTES 9
+
+#define OCTEON_SPI_MAX_CLOCK_HZ 16000000
+
+struct octeon_spi {
+ u64 register_base;
+ u64 last_cfg;
+ u64 cs_enax;
+};
+
+static void octeon_spi_wait_ready(struct octeon_spi *p)
+{
+ union cvmx_mpi_sts mpi_sts;
+ unsigned int loops = 0;
+
+ do {
+ if (loops++)
+ __delay(500);
+ mpi_sts.u64 = cvmx_read_csr(p->register_base + OCTEON_SPI_STS);
+ } while (mpi_sts.s.busy);
+}
+
+static int octeon_spi_do_transfer(struct octeon_spi *p,
+ struct spi_message *msg,
+ struct spi_transfer *xfer,
+ bool last_xfer)
+{
+ struct spi_device *spi = msg->spi;
+ union cvmx_mpi_cfg mpi_cfg;
+ union cvmx_mpi_tx mpi_tx;
+ unsigned int clkdiv;
+ unsigned int speed_hz;
+ int mode;
+ bool cpha, cpol;
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+ int i;
+
+ mode = spi->mode;
+ cpha = mode & SPI_CPHA;
+ cpol = mode & SPI_CPOL;
+
+ speed_hz = xfer->speed_hz ? : spi->max_speed_hz;
+
+ clkdiv = octeon_get_io_clock_rate() / (2 * speed_hz);
+
+ mpi_cfg.u64 = 0;
+
+ mpi_cfg.s.clkdiv = clkdiv;
+ mpi_cfg.s.cshi = (mode & SPI_CS_HIGH) ? 1 : 0;
+ mpi_cfg.s.lsbfirst = (mode & SPI_LSB_FIRST) ? 1 : 0;
+ mpi_cfg.s.wireor = (mode & SPI_3WIRE) ? 1 : 0;
+ mpi_cfg.s.idlelo = cpha != cpol;
+ mpi_cfg.s.cslate = cpha ? 1 : 0;
+ mpi_cfg.s.enable = 1;
+
+ if (spi->chip_select < 4)
+ p->cs_enax |= 1ull << (12 + spi->chip_select);
+ mpi_cfg.u64 |= p->cs_enax;
+
+ if (mpi_cfg.u64 != p->last_cfg) {
+ p->last_cfg = mpi_cfg.u64;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_CFG, mpi_cfg.u64);
+ }
+ tx_buf = xfer->tx_buf;
+ rx_buf = xfer->rx_buf;
+ len = xfer->len;
+ while (len > OCTEON_SPI_MAX_BYTES) {
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
+ }
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ mpi_tx.s.leavecs = 1;
+ mpi_tx.s.txnum = tx_buf ? OCTEON_SPI_MAX_BYTES : 0;
+ mpi_tx.s.totnum = OCTEON_SPI_MAX_BYTES;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < OCTEON_SPI_MAX_BYTES; i++) {
+ u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+ len -= OCTEON_SPI_MAX_BYTES;
+ }
+
+ for (i = 0; i < len; i++) {
+ u8 d;
+ if (tx_buf)
+ d = *tx_buf++;
+ else
+ d = 0;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i), d);
+ }
+
+ mpi_tx.u64 = 0;
+ mpi_tx.s.csid = spi->chip_select;
+ if (last_xfer)
+ mpi_tx.s.leavecs = xfer->cs_change;
+ else
+ mpi_tx.s.leavecs = !xfer->cs_change;
+ mpi_tx.s.txnum = tx_buf ? len : 0;
+ mpi_tx.s.totnum = len;
+ cvmx_write_csr(p->register_base + OCTEON_SPI_TX, mpi_tx.u64);
+
+ octeon_spi_wait_ready(p);
+ if (rx_buf)
+ for (i = 0; i < len; i++) {
+ u64 v = cvmx_read_csr(p->register_base + OCTEON_SPI_DAT0 + (8 * i));
+ *rx_buf++ = (u8)v;
+ }
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ return xfer->len;
+}
+
+static int octeon_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct octeon_spi *p = spi_master_get_devdata(master);
+ unsigned int total_len = 0;
+ int status = 0;
+ struct spi_transfer *xfer;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ bool last_xfer = list_is_last(&xfer->transfer_list,
+ &msg->transfers);
+ int r = octeon_spi_do_transfer(p, msg, xfer, last_xfer);
+ if (r < 0) {
+ status = r;
+ goto err;
+ }
+ total_len += r;
+ }
+err:
+ msg->status = status;
+ msg->actual_length = total_len;
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int octeon_spi_probe(struct platform_device *pdev)
+{
+ struct resource *res_mem;
+ struct spi_master *master;
+ struct octeon_spi *p;
+ int err = -ENOENT;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct octeon_spi));
+ if (!master)
+ return -ENOMEM;
+ p = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, master);
+
+ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (res_mem == NULL) {
+ dev_err(&pdev->dev, "found no memory resource\n");
+ err = -ENXIO;
+ goto fail;
+ }
+ if (!devm_request_mem_region(&pdev->dev, res_mem->start,
+ resource_size(res_mem), res_mem->name)) {
+ dev_err(&pdev->dev, "request_mem_region failed\n");
+ goto fail;
+ }
+ p->register_base = (u64)devm_ioremap(&pdev->dev, res_mem->start,
+ resource_size(res_mem));
+
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPHA |
+ SPI_CPOL |
+ SPI_CS_HIGH |
+ SPI_LSB_FIRST |
+ SPI_3WIRE;
+
+ master->transfer_one_message = octeon_spi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->max_speed_hz = OCTEON_SPI_MAX_CLOCK_HZ;
+
+ master->dev.of_node = pdev->dev.of_node;
+ err = devm_spi_register_master(&pdev->dev, master);
+ if (err) {
+ dev_err(&pdev->dev, "register master failed: %d\n", err);
+ goto fail;
+ }
+
+ dev_info(&pdev->dev, "OCTEON SPI bus driver\n");
+
+ return 0;
+fail:
+ spi_master_put(master);
+ return err;
+}
+
+static int octeon_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct octeon_spi *p = spi_master_get_devdata(master);
+ u64 register_base = p->register_base;
+
+ /* Clear the CSENA* and put everything in a known state. */
+ cvmx_write_csr(register_base + OCTEON_SPI_CFG, 0);
+
+ return 0;
+}
+
+static const struct of_device_id octeon_spi_match[] = {
+ { .compatible = "cavium,octeon-3010-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, octeon_spi_match);
+
+static struct platform_driver octeon_spi_driver = {
+ .driver = {
+ .name = "spi-octeon",
+ .of_match_table = octeon_spi_match,
+ },
+ .probe = octeon_spi_probe,
+ .remove = octeon_spi_remove,
+};
+
+module_platform_driver(octeon_spi_driver);
+
+MODULE_DESCRIPTION("Cavium, Inc. OCTEON SPI bus driver");
+MODULE_AUTHOR("David Daney");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap-100k.c b/drivers/spi/spi-omap-100k.c
new file mode 100644
index 000000000..35b332dac
--- /dev/null
+++ b/drivers/spi/spi-omap-100k.c
@@ -0,0 +1,512 @@
+/*
+ * OMAP7xx SPI 100k controller driver
+ * Author: Fabrice Crohas <fcrohas@gmail.com>
+ * from original omap1_mcspi driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+
+#define OMAP1_SPI100K_MAX_FREQ 48000000
+
+#define ICR_SPITAS (OMAP7XX_ICR_BASE + 0x12)
+
+#define SPI_SETUP1 0x00
+#define SPI_SETUP2 0x02
+#define SPI_CTRL 0x04
+#define SPI_STATUS 0x06
+#define SPI_TX_LSB 0x08
+#define SPI_TX_MSB 0x0a
+#define SPI_RX_LSB 0x0c
+#define SPI_RX_MSB 0x0e
+
+#define SPI_SETUP1_INT_READ_ENABLE (1UL << 5)
+#define SPI_SETUP1_INT_WRITE_ENABLE (1UL << 4)
+#define SPI_SETUP1_CLOCK_DIVISOR(x) ((x) << 1)
+#define SPI_SETUP1_CLOCK_ENABLE (1UL << 0)
+
+#define SPI_SETUP2_ACTIVE_EDGE_FALLING (0UL << 0)
+#define SPI_SETUP2_ACTIVE_EDGE_RISING (1UL << 0)
+#define SPI_SETUP2_NEGATIVE_LEVEL (0UL << 5)
+#define SPI_SETUP2_POSITIVE_LEVEL (1UL << 5)
+#define SPI_SETUP2_LEVEL_TRIGGER (0UL << 10)
+#define SPI_SETUP2_EDGE_TRIGGER (1UL << 10)
+
+#define SPI_CTRL_SEN(x) ((x) << 7)
+#define SPI_CTRL_WORD_SIZE(x) (((x) - 1) << 2)
+#define SPI_CTRL_WR (1UL << 1)
+#define SPI_CTRL_RD (1UL << 0)
+
+#define SPI_STATUS_WE (1UL << 1)
+#define SPI_STATUS_RD (1UL << 0)
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 8
+
+#define SPI_RUNNING 0
+#define SPI_SHUTDOWN 1
+
+struct omap1_spi100k {
+ struct clk *ick;
+ struct clk *fck;
+
+ /* Virtual base address of the controller */
+ void __iomem *base;
+};
+
+struct omap1_spi100k_cs {
+ void __iomem *base;
+ int word_len;
+};
+
+static void spi100k_enable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* enable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val |= SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_disable_clock(struct spi_master *master)
+{
+ unsigned int val;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* disable SPI */
+ val = readw(spi100k->base + SPI_SETUP1);
+ val &= ~SPI_SETUP1_CLOCK_ENABLE;
+ writew(val, spi100k->base + SPI_SETUP1);
+}
+
+static void spi100k_write_data(struct spi_master *master, int len, int data)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* write 16-bit word, shifting 8-bit data if necessary */
+ if (len <= 8) {
+ data <<= 8;
+ len = 16;
+ }
+
+ spi100k_enable_clock(master);
+ writew(data , spi100k->base + SPI_TX_MSB);
+
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_WR,
+ spi100k->base + SPI_CTRL);
+
+ /* Wait for bit ack send change */
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_WE) != SPI_STATUS_WE)
+ ;
+ udelay(1000);
+
+ spi100k_disable_clock(master);
+}
+
+static int spi100k_read_data(struct spi_master *master, int len)
+{
+ int dataH, dataL;
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ /* Always do at least 16 bits */
+ if (len <= 8)
+ len = 16;
+
+ spi100k_enable_clock(master);
+ writew(SPI_CTRL_SEN(0) |
+ SPI_CTRL_WORD_SIZE(len) |
+ SPI_CTRL_RD,
+ spi100k->base + SPI_CTRL);
+
+ while ((readw(spi100k->base + SPI_STATUS) & SPI_STATUS_RD) != SPI_STATUS_RD)
+ ;
+ udelay(1000);
+
+ dataL = readw(spi100k->base + SPI_RX_LSB);
+ dataH = readw(spi100k->base + SPI_RX_MSB);
+ spi100k_disable_clock(master);
+
+ return dataL;
+}
+
+static void spi100k_open(struct spi_master *master)
+{
+ /* get control of SPI */
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ writew(SPI_SETUP1_INT_READ_ENABLE |
+ SPI_SETUP1_INT_WRITE_ENABLE |
+ SPI_SETUP1_CLOCK_DIVISOR(0), spi100k->base + SPI_SETUP1);
+
+ /* configure clock and interrupts */
+ writew(SPI_SETUP2_ACTIVE_EDGE_FALLING |
+ SPI_SETUP2_NEGATIVE_LEVEL |
+ SPI_SETUP2_LEVEL_TRIGGER, spi100k->base + SPI_SETUP2);
+}
+
+static void omap1_spi100k_force_cs(struct omap1_spi100k *spi100k, int enable)
+{
+ if (enable)
+ writew(0x05fc, spi100k->base + SPI_CTRL);
+ else
+ writew(0x05fd, spi100k->base + SPI_CTRL);
+}
+
+static unsigned
+omap1_spi100k_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ int word_len;
+
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 1;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 2;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx++);
+ if (xfer->rx_buf != NULL)
+ *rx++ = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 4;
+ if (xfer->tx_buf != NULL)
+ spi100k_write_data(spi->master, word_len, *tx);
+ if (xfer->rx_buf != NULL)
+ *rx = spi100k_read_data(spi->master, word_len);
+ } while (c);
+ }
+ return count - c;
+}
+
+/* called only when no transfer is active to this device */
+static int omap1_spi100k_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(spi->master);
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+ u8 word_len = spi->bits_per_word;
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+ if (!word_len)
+ word_len = 8;
+
+ if (spi->bits_per_word > 32)
+ return -EINVAL;
+ cs->word_len = word_len;
+
+ /* SPI init before transfer */
+ writew(0x3e , spi100k->base + SPI_SETUP1);
+ writew(0x00 , spi100k->base + SPI_STATUS);
+ writew(0x3e , spi100k->base + SPI_CTRL);
+
+ return 0;
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+static int omap1_spi100k_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap1_spi100k *spi100k;
+ struct omap1_spi100k_cs *cs = spi->controller_state;
+
+ spi100k = spi_master_get_devdata(spi->master);
+
+ if (!cs) {
+ cs = devm_kzalloc(&spi->dev, sizeof(*cs), GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = spi100k->base + spi->chip_select * 0x14;
+ spi->controller_state = cs;
+ }
+
+ spi100k_open(spi->master);
+
+ clk_prepare_enable(spi100k->ick);
+ clk_prepare_enable(spi100k->fck);
+
+ ret = omap1_spi100k_setup_transfer(spi, NULL);
+
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
+
+ return ret;
+}
+
+static int omap1_spi100k_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t = NULL;
+ int cs_active = 0;
+ int par_override = 0;
+ int status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override || t->speed_hz || t->bits_per_word) {
+ par_override = 1;
+ status = omap1_spi100k_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (!t->speed_hz && !t->bits_per_word)
+ par_override = 0;
+ }
+
+ if (!cs_active) {
+ omap1_spi100k_force_cs(spi100k, 1);
+ cs_active = 1;
+ }
+
+ if (t->len) {
+ unsigned count;
+
+ count = omap1_spi100k_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+
+ if (t->cs_change) {
+ omap1_spi100k_force_cs(spi100k, 0);
+ cs_active = 0;
+ }
+ }
+
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap1_spi100k_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap1_spi100k_force_cs(spi100k, 0);
+
+ m->status = status;
+
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int omap1_spi100k_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct omap1_spi100k *spi100k;
+ int status = 0;
+
+ if (!pdev->id)
+ return -EINVAL;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi100k));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+
+ master->setup = omap1_spi100k_setup;
+ master->transfer_one_message = omap1_spi100k_transfer_one_message;
+ master->num_chipselect = 2;
+ master->mode_bits = MODEBITS;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->min_speed_hz = OMAP1_SPI100K_MAX_FREQ/(1<<16);
+ master->max_speed_hz = OMAP1_SPI100K_MAX_FREQ;
+ master->auto_runtime_pm = true;
+
+ spi100k = spi_master_get_devdata(master);
+
+ /*
+ * The memory region base address is taken as the platform_data.
+ * You should allocate this with ioremap() before initializing
+ * the SPI.
+ */
+ spi100k->base = (void __iomem *)dev_get_platdata(&pdev->dev);
+
+ spi100k->ick = devm_clk_get(&pdev->dev, "ick");
+ if (IS_ERR(spi100k->ick)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_ick\n");
+ status = PTR_ERR(spi100k->ick);
+ goto err;
+ }
+
+ spi100k->fck = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(spi100k->fck)) {
+ dev_dbg(&pdev->dev, "can't get spi100k_fck\n");
+ status = PTR_ERR(spi100k->fck);
+ goto err;
+ }
+
+ status = clk_prepare_enable(spi100k->ick);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable ick: %d\n", status);
+ goto err;
+ }
+
+ status = clk_prepare_enable(spi100k->fck);
+ if (status != 0) {
+ dev_err(&pdev->dev, "failed to enable fck: %d\n", status);
+ goto err_ick;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status < 0)
+ goto err_fck;
+
+ return status;
+
+err_fck:
+ clk_disable_unprepare(spi100k->fck);
+err_ick:
+ clk_disable_unprepare(spi100k->ick);
+err:
+ spi_master_put(master);
+ return status;
+}
+
+static int omap1_spi100k_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(spi100k->fck);
+ clk_disable_unprepare(spi100k->ick);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int omap1_spi100k_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi100k->ick);
+ clk_disable_unprepare(spi100k->fck);
+
+ return 0;
+}
+
+static int omap1_spi100k_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = spi_master_get(dev_get_drvdata(dev));
+ struct omap1_spi100k *spi100k = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(spi100k->ick);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable ick: %d\n", ret);
+ return ret;
+ }
+
+ ret = clk_prepare_enable(spi100k->fck);
+ if (ret != 0) {
+ dev_err(dev, "Failed to enable fck: %d\n", ret);
+ clk_disable_unprepare(spi100k->ick);
+ return ret;
+ }
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops omap1_spi100k_pm = {
+ SET_RUNTIME_PM_OPS(omap1_spi100k_runtime_suspend,
+ omap1_spi100k_runtime_resume, NULL)
+};
+
+static struct platform_driver omap1_spi100k_driver = {
+ .driver = {
+ .name = "omap1_spi100k",
+ .pm = &omap1_spi100k_pm,
+ },
+ .probe = omap1_spi100k_probe,
+ .remove = omap1_spi100k_remove,
+};
+
+module_platform_driver(omap1_spi100k_driver);
+
+MODULE_DESCRIPTION("OMAP7xx SPI 100k controller driver");
+MODULE_AUTHOR("Fabrice Crohas <fcrohas@gmail.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-omap-uwire.c b/drivers/spi/spi-omap-uwire.c
new file mode 100644
index 000000000..55576db31
--- /dev/null
+++ b/drivers/spi/spi-omap-uwire.c
@@ -0,0 +1,561 @@
+/*
+ * MicroWire interface driver for OMAP
+ *
+ * Copyright 2003 MontaVista Software Inc. <source@mvista.com>
+ *
+ * Ported to 2.6 OMAP uwire interface.
+ * Copyright (C) 2004 Texas Instruments.
+ *
+ * Generalization patches by Juha Yrjola <juha.yrjola@nokia.com>
+ *
+ * Copyright (C) 2005 David Brownell (ported to 2.6 SPI interface)
+ * Copyright (C) 2006 Nokia
+ *
+ * Many updates by Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+ * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
+#include <linux/io.h>
+
+#include <mach/hardware.h>
+#include <asm/mach-types.h>
+
+#include <mach/mux.h>
+
+#include <mach/omap7xx.h> /* OMAP7XX_IO_CONF registers */
+
+
+/* FIXME address is now a platform device resource,
+ * and irqs should show there too...
+ */
+#define UWIRE_BASE_PHYS 0xFFFB3000
+
+/* uWire Registers: */
+#define UWIRE_IO_SIZE 0x20
+#define UWIRE_TDR 0x00
+#define UWIRE_RDR 0x00
+#define UWIRE_CSR 0x01
+#define UWIRE_SR1 0x02
+#define UWIRE_SR2 0x03
+#define UWIRE_SR3 0x04
+#define UWIRE_SR4 0x05
+#define UWIRE_SR5 0x06
+
+/* CSR bits */
+#define RDRB (1 << 15)
+#define CSRB (1 << 14)
+#define START (1 << 13)
+#define CS_CMD (1 << 12)
+
+/* SR1 or SR2 bits */
+#define UWIRE_READ_FALLING_EDGE 0x0001
+#define UWIRE_READ_RISING_EDGE 0x0000
+#define UWIRE_WRITE_FALLING_EDGE 0x0000
+#define UWIRE_WRITE_RISING_EDGE 0x0002
+#define UWIRE_CS_ACTIVE_LOW 0x0000
+#define UWIRE_CS_ACTIVE_HIGH 0x0004
+#define UWIRE_FREQ_DIV_2 0x0000
+#define UWIRE_FREQ_DIV_4 0x0008
+#define UWIRE_FREQ_DIV_8 0x0010
+#define UWIRE_CHK_READY 0x0020
+#define UWIRE_CLK_INVERTED 0x0040
+
+
+struct uwire_spi {
+ struct spi_bitbang bitbang;
+ struct clk *ck;
+};
+
+struct uwire_state {
+ unsigned div1_idx;
+};
+
+/* REVISIT compile time constant for idx_shift? */
+/*
+ * Or, put it in a structure which is used throughout the driver;
+ * that avoids having to issue two loads for each bit of static data.
+ */
+static unsigned int uwire_idx_shift;
+static void __iomem *uwire_base;
+
+static inline void uwire_write_reg(int idx, u16 val)
+{
+ __raw_writew(val, uwire_base + (idx << uwire_idx_shift));
+}
+
+static inline u16 uwire_read_reg(int idx)
+{
+ return __raw_readw(uwire_base + (idx << uwire_idx_shift));
+}
+
+static inline void omap_uwire_configure_mode(u8 cs, unsigned long flags)
+{
+ u16 w, val = 0;
+ int shift, reg;
+
+ if (flags & UWIRE_CLK_INVERTED)
+ val ^= 0x03;
+ val = flags & 0x3f;
+ if (cs & 1)
+ shift = 6;
+ else
+ shift = 0;
+ if (cs <= 1)
+ reg = UWIRE_SR1;
+ else
+ reg = UWIRE_SR2;
+
+ w = uwire_read_reg(reg);
+ w &= ~(0x3f << shift);
+ w |= val << shift;
+ uwire_write_reg(reg, w);
+}
+
+static int wait_uwire_csr_flag(u16 mask, u16 val, int might_not_catch)
+{
+ u16 w;
+ int c = 0;
+ unsigned long max_jiffies = jiffies + HZ;
+
+ for (;;) {
+ w = uwire_read_reg(UWIRE_CSR);
+ if ((w & mask) == val)
+ break;
+ if (time_after(jiffies, max_jiffies)) {
+ printk(KERN_ERR "%s: timeout. reg=%#06x "
+ "mask=%#06x val=%#06x\n",
+ __func__, w, mask, val);
+ return -1;
+ }
+ c++;
+ if (might_not_catch && c > 64)
+ break;
+ }
+ return 0;
+}
+
+static void uwire_set_clk1_div(int div1_idx)
+{
+ u16 w;
+
+ w = uwire_read_reg(UWIRE_SR3);
+ w &= ~(0x03 << 1);
+ w |= div1_idx << 1;
+ uwire_write_reg(UWIRE_SR3, w);
+}
+
+static void uwire_chipselect(struct spi_device *spi, int value)
+{
+ struct uwire_state *ust = spi->controller_state;
+ u16 w;
+ int old_cs;
+
+
+ BUG_ON(wait_uwire_csr_flag(CSRB, 0, 0));
+
+ w = uwire_read_reg(UWIRE_CSR);
+ old_cs = (w >> 10) & 0x03;
+ if (value == BITBANG_CS_INACTIVE || old_cs != spi->chip_select) {
+ /* Deselect this CS, or the previous CS */
+ w &= ~CS_CMD;
+ uwire_write_reg(UWIRE_CSR, w);
+ }
+ /* activate specfied chipselect */
+ if (value == BITBANG_CS_ACTIVE) {
+ uwire_set_clk1_div(ust->div1_idx);
+ /* invert clock? */
+ if (spi->mode & SPI_CPOL)
+ uwire_write_reg(UWIRE_SR4, 1);
+ else
+ uwire_write_reg(UWIRE_SR4, 0);
+
+ w = spi->chip_select << 10;
+ w |= CS_CMD;
+ uwire_write_reg(UWIRE_CSR, w);
+ }
+}
+
+static int uwire_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ unsigned len = t->len;
+ unsigned bits = t->bits_per_word ? : spi->bits_per_word;
+ unsigned bytes;
+ u16 val, w;
+ int status = 0;
+
+ if (!t->tx_buf && !t->rx_buf)
+ return 0;
+
+ w = spi->chip_select << 10;
+ w |= CS_CMD;
+
+ if (t->tx_buf) {
+ const u8 *buf = t->tx_buf;
+
+ /* NOTE: DMA could be used for TX transfers */
+
+ /* write one or two bytes at a time */
+ while (len >= 1) {
+ /* tx bit 15 is first sent; we byteswap multibyte words
+ * (msb-first) on the way out from memory.
+ */
+ val = *buf++;
+ if (bits > 8) {
+ bytes = 2;
+ val |= *buf++ << 8;
+ } else
+ bytes = 1;
+ val <<= 16 - bits;
+
+#ifdef VERBOSE
+ pr_debug("%s: write-%d =%04x\n",
+ dev_name(&spi->dev), bits, val);
+#endif
+ if (wait_uwire_csr_flag(CSRB, 0, 0))
+ goto eio;
+
+ uwire_write_reg(UWIRE_TDR, val);
+
+ /* start write */
+ val = START | w | (bits << 5);
+
+ uwire_write_reg(UWIRE_CSR, val);
+ len -= bytes;
+
+ /* Wait till write actually starts.
+ * This is needed with MPU clock 60+ MHz.
+ * REVISIT: we may not have time to catch it...
+ */
+ if (wait_uwire_csr_flag(CSRB, CSRB, 1))
+ goto eio;
+
+ status += bytes;
+ }
+
+ /* REVISIT: save this for later to get more i/o overlap */
+ if (wait_uwire_csr_flag(CSRB, 0, 0))
+ goto eio;
+
+ } else if (t->rx_buf) {
+ u8 *buf = t->rx_buf;
+
+ /* read one or two bytes at a time */
+ while (len) {
+ if (bits > 8) {
+ bytes = 2;
+ } else
+ bytes = 1;
+
+ /* start read */
+ val = START | w | (bits << 0);
+ uwire_write_reg(UWIRE_CSR, val);
+ len -= bytes;
+
+ /* Wait till read actually starts */
+ (void) wait_uwire_csr_flag(CSRB, CSRB, 1);
+
+ if (wait_uwire_csr_flag(RDRB | CSRB,
+ RDRB, 0))
+ goto eio;
+
+ /* rx bit 0 is last received; multibyte words will
+ * be properly byteswapped on the way to memory.
+ */
+ val = uwire_read_reg(UWIRE_RDR);
+ val &= (1 << bits) - 1;
+ *buf++ = (u8) val;
+ if (bytes == 2)
+ *buf++ = val >> 8;
+ status += bytes;
+#ifdef VERBOSE
+ pr_debug("%s: read-%d =%04x\n",
+ dev_name(&spi->dev), bits, val);
+#endif
+
+ }
+ }
+ return status;
+eio:
+ return -EIO;
+}
+
+static int uwire_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct uwire_state *ust = spi->controller_state;
+ struct uwire_spi *uwire;
+ unsigned flags = 0;
+ unsigned hz;
+ unsigned long rate;
+ int div1_idx;
+ int div1;
+ int div2;
+ int status;
+
+ uwire = spi_master_get_devdata(spi->master);
+
+ /* mode 0..3, clock inverted separately;
+ * standard nCS signaling;
+ * don't treat DI=high as "not ready"
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ flags |= UWIRE_CS_ACTIVE_HIGH;
+
+ if (spi->mode & SPI_CPOL)
+ flags |= UWIRE_CLK_INVERTED;
+
+ switch (spi->mode & (SPI_CPOL | SPI_CPHA)) {
+ case SPI_MODE_0:
+ case SPI_MODE_3:
+ flags |= UWIRE_WRITE_FALLING_EDGE | UWIRE_READ_RISING_EDGE;
+ break;
+ case SPI_MODE_1:
+ case SPI_MODE_2:
+ flags |= UWIRE_WRITE_RISING_EDGE | UWIRE_READ_FALLING_EDGE;
+ break;
+ }
+
+ /* assume it's already enabled */
+ rate = clk_get_rate(uwire->ck);
+
+ hz = spi->max_speed_hz;
+ if (t != NULL && t->speed_hz)
+ hz = t->speed_hz;
+
+ if (!hz) {
+ pr_debug("%s: zero speed?\n", dev_name(&spi->dev));
+ status = -EINVAL;
+ goto done;
+ }
+
+ /* F_INT = mpu_xor_clk / DIV1 */
+ for (div1_idx = 0; div1_idx < 4; div1_idx++) {
+ switch (div1_idx) {
+ case 0:
+ div1 = 2;
+ break;
+ case 1:
+ div1 = 4;
+ break;
+ case 2:
+ div1 = 7;
+ break;
+ default:
+ case 3:
+ div1 = 10;
+ break;
+ }
+ div2 = (rate / div1 + hz - 1) / hz;
+ if (div2 <= 8)
+ break;
+ }
+ if (div1_idx == 4) {
+ pr_debug("%s: lowest clock %ld, need %d\n",
+ dev_name(&spi->dev), rate / 10 / 8, hz);
+ status = -EDOM;
+ goto done;
+ }
+
+ /* we have to cache this and reset in uwire_chipselect as this is a
+ * global parameter and another uwire device can change it under
+ * us */
+ ust->div1_idx = div1_idx;
+ uwire_set_clk1_div(div1_idx);
+
+ rate /= div1;
+
+ switch (div2) {
+ case 0:
+ case 1:
+ case 2:
+ flags |= UWIRE_FREQ_DIV_2;
+ rate /= 2;
+ break;
+ case 3:
+ case 4:
+ flags |= UWIRE_FREQ_DIV_4;
+ rate /= 4;
+ break;
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ flags |= UWIRE_FREQ_DIV_8;
+ rate /= 8;
+ break;
+ }
+ omap_uwire_configure_mode(spi->chip_select, flags);
+ pr_debug("%s: uwire flags %02x, armxor %lu KHz, SCK %lu KHz\n",
+ __func__, flags,
+ clk_get_rate(uwire->ck) / 1000,
+ rate / 1000);
+ status = 0;
+done:
+ return status;
+}
+
+static int uwire_setup(struct spi_device *spi)
+{
+ struct uwire_state *ust = spi->controller_state;
+
+ if (ust == NULL) {
+ ust = kzalloc(sizeof(*ust), GFP_KERNEL);
+ if (ust == NULL)
+ return -ENOMEM;
+ spi->controller_state = ust;
+ }
+
+ return uwire_setup_transfer(spi, NULL);
+}
+
+static void uwire_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static void uwire_off(struct uwire_spi *uwire)
+{
+ uwire_write_reg(UWIRE_SR3, 0);
+ clk_disable(uwire->ck);
+ spi_master_put(uwire->bitbang.master);
+}
+
+static int uwire_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct uwire_spi *uwire;
+ int status;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *uwire);
+ if (!master)
+ return -ENODEV;
+
+ uwire = spi_master_get_devdata(master);
+
+ uwire_base = devm_ioremap(&pdev->dev, UWIRE_BASE_PHYS, UWIRE_IO_SIZE);
+ if (!uwire_base) {
+ dev_dbg(&pdev->dev, "can't ioremap UWIRE\n");
+ spi_master_put(master);
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, uwire);
+
+ uwire->ck = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(uwire->ck)) {
+ status = PTR_ERR(uwire->ck);
+ dev_dbg(&pdev->dev, "no functional clock?\n");
+ spi_master_put(master);
+ return status;
+ }
+ clk_enable(uwire->ck);
+
+ if (cpu_is_omap7xx())
+ uwire_idx_shift = 1;
+ else
+ uwire_idx_shift = 2;
+
+ uwire_write_reg(UWIRE_SR3, 1);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+
+ master->bus_num = 2; /* "official" */
+ master->num_chipselect = 4;
+ master->setup = uwire_setup;
+ master->cleanup = uwire_cleanup;
+
+ uwire->bitbang.master = master;
+ uwire->bitbang.chipselect = uwire_chipselect;
+ uwire->bitbang.setup_transfer = uwire_setup_transfer;
+ uwire->bitbang.txrx_bufs = uwire_txrx;
+
+ status = spi_bitbang_start(&uwire->bitbang);
+ if (status < 0) {
+ uwire_off(uwire);
+ }
+ return status;
+}
+
+static int uwire_remove(struct platform_device *pdev)
+{
+ struct uwire_spi *uwire = platform_get_drvdata(pdev);
+
+ // FIXME remove all child devices, somewhere ...
+
+ spi_bitbang_stop(&uwire->bitbang);
+ uwire_off(uwire);
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:omap_uwire");
+
+static struct platform_driver uwire_driver = {
+ .driver = {
+ .name = "omap_uwire",
+ },
+ .probe = uwire_probe,
+ .remove = uwire_remove,
+ // suspend ... unuse ck
+ // resume ... use ck
+};
+
+static int __init omap_uwire_init(void)
+{
+ /* FIXME move these into the relevant board init code. also, include
+ * H3 support; it uses tsc2101 like H2 (on a different chipselect).
+ */
+
+ if (machine_is_omap_h2()) {
+ /* defaults: W21 SDO, U18 SDI, V19 SCL */
+ omap_cfg_reg(N14_1610_UWIRE_CS0);
+ omap_cfg_reg(N15_1610_UWIRE_CS1);
+ }
+ if (machine_is_omap_perseus2()) {
+ /* configure pins: MPU_UW_nSCS1, MPU_UW_SDO, MPU_UW_SCLK */
+ int val = omap_readl(OMAP7XX_IO_CONF_9) & ~0x00EEE000;
+ omap_writel(val | 0x00AAA000, OMAP7XX_IO_CONF_9);
+ }
+
+ return platform_driver_register(&uwire_driver);
+}
+
+static void __exit omap_uwire_exit(void)
+{
+ platform_driver_unregister(&uwire_driver);
+}
+
+subsys_initcall(omap_uwire_init);
+module_exit(omap_uwire_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
new file mode 100644
index 000000000..d1a5b9fc3
--- /dev/null
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -0,0 +1,1535 @@
+/*
+ * OMAP2 McSPI controller driver
+ *
+ * Copyright (C) 2005, 2006 Nokia Corporation
+ * Author: Samuel Ortiz <samuel.ortiz@nokia.com> and
+ * Juha Yrj�l� <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/gcd.h>
+
+#include <linux/spi/spi.h>
+
+#include <linux/platform_data/spi-omap2-mcspi.h>
+
+#define OMAP2_MCSPI_MAX_FREQ 48000000
+#define OMAP2_MCSPI_MAX_DIVIDER 4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH 64
+#define OMAP2_MCSPI_MAX_FIFOWCNT 0xFFFF
+#define SPI_AUTOSUSPEND_TIMEOUT 2000
+
+#define OMAP2_MCSPI_REVISION 0x00
+#define OMAP2_MCSPI_SYSSTATUS 0x14
+#define OMAP2_MCSPI_IRQSTATUS 0x18
+#define OMAP2_MCSPI_IRQENABLE 0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE 0x20
+#define OMAP2_MCSPI_SYST 0x24
+#define OMAP2_MCSPI_MODULCTRL 0x28
+#define OMAP2_MCSPI_XFERLEVEL 0x7c
+
+/* per-channel banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHCONF0 0x2c
+#define OMAP2_MCSPI_CHSTAT0 0x30
+#define OMAP2_MCSPI_CHCTRL0 0x34
+#define OMAP2_MCSPI_TX0 0x38
+#define OMAP2_MCSPI_RX0 0x3c
+
+/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW BIT(17)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST BIT(3)
+
+#define OMAP2_MCSPI_CHCONF_PHA BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL BIT(1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK (0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL BIT(6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK (0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY BIT(13)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK (0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0 BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1 BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG BIT(29)
+
+#define OMAP2_MCSPI_CHSTAT_RXS BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE BIT(3)
+
+#define OMAP2_MCSPI_CHCTRL_EN BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK (0xff << 8)
+
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN BIT(0)
+
+/* We have 2 DMA channels per CS, one for RX and one for TX */
+struct omap2_mcspi_dma {
+ struct dma_chan *dma_tx;
+ struct dma_chan *dma_rx;
+
+ int dma_tx_sync_dev;
+ int dma_rx_sync_dev;
+
+ struct completion dma_tx_completion;
+ struct completion dma_rx_completion;
+
+ char dma_rx_ch_name[14];
+ char dma_tx_ch_name[14];
+};
+
+/* use PIO for small transfers, avoiding DMA setup/teardown overhead and
+ * cache operations; better heuristics consider wordsize and bitrate.
+ */
+#define DMA_MIN_BYTES 160
+
+
+/*
+ * Used for context save and restore, structure members to be updated whenever
+ * corresponding registers are modified.
+ */
+struct omap2_mcspi_regs {
+ u32 modulctrl;
+ u32 wakeupenable;
+ struct list_head cs;
+};
+
+struct omap2_mcspi {
+ struct spi_master *master;
+ /* Virtual base address of the controller */
+ void __iomem *base;
+ unsigned long phys;
+ /* SPI1 has 4 channels, while SPI2 has 2 */
+ struct omap2_mcspi_dma *dma_channels;
+ struct device *dev;
+ struct omap2_mcspi_regs ctx;
+ int fifo_depth;
+ unsigned int pin_dir:1;
+};
+
+struct omap2_mcspi_cs {
+ void __iomem *base;
+ unsigned long phys;
+ int word_len;
+ u16 mode;
+ struct list_head node;
+ /* Context save and restore shadow register */
+ u32 chconf0, chctrl0;
+};
+
+static inline void mcspi_write_reg(struct spi_master *master,
+ int idx, u32 val)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ writel_relaxed(val, mcspi->base + idx);
+}
+
+static inline u32 mcspi_read_reg(struct spi_master *master, int idx)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ return readl_relaxed(mcspi->base + idx);
+}
+
+static inline void mcspi_write_cs_reg(const struct spi_device *spi,
+ int idx, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ writel_relaxed(val, cs->base + idx);
+}
+
+static inline u32 mcspi_read_cs_reg(const struct spi_device *spi, int idx)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return readl_relaxed(cs->base + idx);
+}
+
+static inline u32 mcspi_cached_chconf0(const struct spi_device *spi)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ return cs->chconf0;
+}
+
+static inline void mcspi_write_chconf0(const struct spi_device *spi, u32 val)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ cs->chconf0 = val;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCONF0, val);
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCONF0);
+}
+
+static inline int mcspi_bytes_per_word(int word_len)
+{
+ if (word_len <= 8)
+ return 1;
+ else if (word_len <= 16)
+ return 2;
+ else /* word_len <= 32 */
+ return 4;
+}
+
+static void omap2_mcspi_set_dma_req(const struct spi_device *spi,
+ int is_read, int enable)
+{
+ u32 l, rw;
+
+ l = mcspi_cached_chconf0(spi);
+
+ if (is_read) /* 1 is read, 0 write */
+ rw = OMAP2_MCSPI_CHCONF_DMAR;
+ else
+ rw = OMAP2_MCSPI_CHCONF_DMAW;
+
+ if (enable)
+ l |= rw;
+ else
+ l &= ~rw;
+
+ mcspi_write_chconf0(spi, l);
+}
+
+static void omap2_mcspi_set_enable(const struct spi_device *spi, int enable)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ u32 l;
+
+ l = cs->chctrl0;
+ if (enable)
+ l |= OMAP2_MCSPI_CHCTRL_EN;
+ else
+ l &= ~OMAP2_MCSPI_CHCTRL_EN;
+ cs->chctrl0 = l;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ /* Flash post-writes */
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHCTRL0);
+}
+
+static void omap2_mcspi_force_cs(struct spi_device *spi, int cs_active)
+{
+ u32 l;
+
+ l = mcspi_cached_chconf0(spi);
+ if (cs_active)
+ l |= OMAP2_MCSPI_CHCONF_FORCE;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+
+ mcspi_write_chconf0(spi, l);
+}
+
+static void omap2_mcspi_set_master_mode(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ u32 l;
+
+ /*
+ * Setup when switching from (reset default) slave mode
+ * to single-channel master mode
+ */
+ l = mcspi_read_reg(master, OMAP2_MCSPI_MODULCTRL);
+ l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
+ l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, l);
+
+ ctx->modulctrl = l;
+}
+
+static void omap2_mcspi_set_fifo(const struct spi_device *spi,
+ struct spi_transfer *t, int enable)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ unsigned int wcnt;
+ int max_fifo_depth, fifo_depth, bytes_per_word;
+ u32 chconf, xferlevel;
+
+ mcspi = spi_master_get_devdata(master);
+
+ chconf = mcspi_cached_chconf0(spi);
+ if (enable) {
+ bytes_per_word = mcspi_bytes_per_word(cs->word_len);
+ if (t->len % bytes_per_word != 0)
+ goto disable_fifo;
+
+ if (t->rx_buf != NULL && t->tx_buf != NULL)
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+ else
+ max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH;
+
+ fifo_depth = gcd(t->len, max_fifo_depth);
+ if (fifo_depth < 2 || fifo_depth % bytes_per_word != 0)
+ goto disable_fifo;
+
+ wcnt = t->len / bytes_per_word;
+ if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT)
+ goto disable_fifo;
+
+ xferlevel = wcnt << 16;
+ if (t->rx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFER;
+ xferlevel |= (fifo_depth - 1) << 8;
+ }
+ if (t->tx_buf != NULL) {
+ chconf |= OMAP2_MCSPI_CHCONF_FFET;
+ xferlevel |= fifo_depth - 1;
+ }
+
+ mcspi_write_reg(master, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = fifo_depth;
+
+ return;
+ }
+
+disable_fifo:
+ if (t->rx_buf != NULL)
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFER;
+
+ if (t->tx_buf != NULL)
+ chconf &= ~OMAP2_MCSPI_CHCONF_FFET;
+
+ mcspi_write_chconf0(spi, chconf);
+ mcspi->fifo_depth = 0;
+}
+
+static void omap2_mcspi_restore_ctx(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *spi_cntrl = mcspi->master;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+ /* McSPI: context restore */
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_MODULCTRL, ctx->modulctrl);
+ mcspi_write_reg(spi_cntrl, OMAP2_MCSPI_WAKEUPENABLE, ctx->wakeupenable);
+
+ list_for_each_entry(cs, &ctx->cs, node)
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+}
+
+static int mcspi_wait_for_reg_bit(void __iomem *reg, unsigned long bit)
+{
+ unsigned long timeout;
+
+ timeout = jiffies + msecs_to_jiffies(1000);
+ while (!(readl_relaxed(reg) & bit)) {
+ if (time_after(jiffies, timeout)) {
+ if (!(readl_relaxed(reg) & bit))
+ return -ETIMEDOUT;
+ else
+ return 0;
+ }
+ cpu_relax();
+ }
+ return 0;
+}
+
+static void omap2_mcspi_rx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ /* We must disable the DMA RX request */
+ omap2_mcspi_set_dma_req(spi, 1, 0);
+
+ complete(&mcspi_dma->dma_rx_completion);
+}
+
+static void omap2_mcspi_tx_callback(void *data)
+{
+ struct spi_device *spi = data;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ /* We must disable the DMA TX request */
+ omap2_mcspi_set_dma_req(spi, 0, 0);
+
+ complete(&mcspi_dma->dma_tx_completion);
+}
+
+static void omap2_mcspi_tx_dma(struct spi_device *spi,
+ struct spi_transfer *xfer,
+ struct dma_slave_config cfg)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ count = xfer->len;
+
+ if (mcspi_dma->dma_tx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+
+ dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->tx_dma;
+ sg_dma_len(&sg) = xfer->len;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
+ DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_tx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+ dma_async_issue_pending(mcspi_dma->dma_tx);
+ omap2_mcspi_set_dma_req(spi, 0, 1);
+
+}
+
+static unsigned
+omap2_mcspi_rx_dma(struct spi_device *spi, struct spi_transfer *xfer,
+ struct dma_slave_config cfg,
+ unsigned es)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count, dma_count;
+ u32 l;
+ int elements = 0;
+ int word_len, element_count;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ count = xfer->len;
+ dma_count = xfer->len;
+
+ if (mcspi->fifo_depth == 0)
+ dma_count -= es;
+
+ word_len = cs->word_len;
+ l = mcspi_cached_chconf0(spi);
+
+ if (word_len <= 8)
+ element_count = count;
+ else if (word_len <= 16)
+ element_count = count >> 1;
+ else /* word_len <= 32 */
+ element_count = count >> 2;
+
+ if (mcspi_dma->dma_rx) {
+ struct dma_async_tx_descriptor *tx;
+ struct scatterlist sg;
+
+ dmaengine_slave_config(mcspi_dma->dma_rx, &cfg);
+
+ if ((l & OMAP2_MCSPI_CHCONF_TURBO) && mcspi->fifo_depth == 0)
+ dma_count -= es;
+
+ sg_init_table(&sg, 1);
+ sg_dma_address(&sg) = xfer->rx_dma;
+ sg_dma_len(&sg) = dma_count;
+
+ tx = dmaengine_prep_slave_sg(mcspi_dma->dma_rx, &sg, 1,
+ DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT |
+ DMA_CTRL_ACK);
+ if (tx) {
+ tx->callback = omap2_mcspi_rx_callback;
+ tx->callback_param = spi;
+ dmaengine_submit(tx);
+ } else {
+ /* FIXME: fall back to PIO? */
+ }
+ }
+
+ dma_async_issue_pending(mcspi_dma->dma_rx);
+ omap2_mcspi_set_dma_req(spi, 1, 1);
+
+ wait_for_completion(&mcspi_dma->dma_rx_completion);
+ dma_unmap_single(mcspi->dev, xfer->rx_dma, count,
+ DMA_FROM_DEVICE);
+
+ if (mcspi->fifo_depth > 0)
+ return count;
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ elements = element_count - 1;
+
+ if (l & OMAP2_MCSPI_CHCONF_TURBO) {
+ elements--;
+
+ if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+ & OMAP2_MCSPI_CHSTAT_RXS)) {
+ u32 w;
+
+ w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+ if (word_len <= 8)
+ ((u8 *)xfer->rx_buf)[elements++] = w;
+ else if (word_len <= 16)
+ ((u16 *)xfer->rx_buf)[elements++] = w;
+ else /* word_len <= 32 */
+ ((u32 *)xfer->rx_buf)[elements++] = w;
+ } else {
+ int bytes_per_word = mcspi_bytes_per_word(word_len);
+ dev_err(&spi->dev, "DMA RX penultimate word empty\n");
+ count -= (bytes_per_word << 1);
+ omap2_mcspi_set_enable(spi, 1);
+ return count;
+ }
+ }
+ if (likely(mcspi_read_cs_reg(spi, OMAP2_MCSPI_CHSTAT0)
+ & OMAP2_MCSPI_CHSTAT_RXS)) {
+ u32 w;
+
+ w = mcspi_read_cs_reg(spi, OMAP2_MCSPI_RX0);
+ if (word_len <= 8)
+ ((u8 *)xfer->rx_buf)[elements] = w;
+ else if (word_len <= 16)
+ ((u16 *)xfer->rx_buf)[elements] = w;
+ else /* word_len <= 32 */
+ ((u32 *)xfer->rx_buf)[elements] = w;
+ } else {
+ dev_err(&spi->dev, "DMA RX last word empty\n");
+ count -= mcspi_bytes_per_word(word_len);
+ }
+ omap2_mcspi_set_enable(spi, 1);
+ return count;
+}
+
+static unsigned
+omap2_mcspi_txrx_dma(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi_dma *mcspi_dma;
+ unsigned int count;
+ u32 l;
+ u8 *rx;
+ const u8 *tx;
+ struct dma_slave_config cfg;
+ enum dma_slave_buswidth width;
+ unsigned es;
+ u32 burst;
+ void __iomem *chstat_reg;
+ void __iomem *irqstat_reg;
+ int wait_res;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+ l = mcspi_cached_chconf0(spi);
+
+
+ if (cs->word_len <= 8) {
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ es = 1;
+ } else if (cs->word_len <= 16) {
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ es = 2;
+ } else {
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ es = 4;
+ }
+
+ count = xfer->len;
+ burst = 1;
+
+ if (mcspi->fifo_depth > 0) {
+ if (count > mcspi->fifo_depth)
+ burst = mcspi->fifo_depth / es;
+ else
+ burst = count / es;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.src_addr = cs->phys + OMAP2_MCSPI_RX0;
+ cfg.dst_addr = cs->phys + OMAP2_MCSPI_TX0;
+ cfg.src_addr_width = width;
+ cfg.dst_addr_width = width;
+ cfg.src_maxburst = burst;
+ cfg.dst_maxburst = burst;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ if (tx != NULL)
+ omap2_mcspi_tx_dma(spi, xfer, cfg);
+
+ if (rx != NULL)
+ count = omap2_mcspi_rx_dma(spi, xfer, cfg, es);
+
+ if (tx != NULL) {
+ wait_for_completion(&mcspi_dma->dma_tx_completion);
+ dma_unmap_single(mcspi->dev, xfer->tx_dma, xfer->len,
+ DMA_TO_DEVICE);
+
+ if (mcspi->fifo_depth > 0) {
+ irqstat_reg = mcspi->base + OMAP2_MCSPI_IRQSTATUS;
+
+ if (mcspi_wait_for_reg_bit(irqstat_reg,
+ OMAP2_MCSPI_IRQSTATUS_EOW) < 0)
+ dev_err(&spi->dev, "EOW timed out\n");
+
+ mcspi_write_reg(mcspi->master, OMAP2_MCSPI_IRQSTATUS,
+ OMAP2_MCSPI_IRQSTATUS_EOW);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (rx == NULL) {
+ chstat_reg = cs->base + OMAP2_MCSPI_CHSTAT0;
+ if (mcspi->fifo_depth > 0) {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXFFE);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXFFE timed out\n");
+ } else {
+ wait_res = mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS);
+ if (wait_res < 0)
+ dev_err(&spi->dev, "TXS timed out\n");
+ }
+ if (wait_res >= 0 &&
+ (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0))
+ dev_err(&spi->dev, "EOT timed out\n");
+ }
+ }
+ return count;
+}
+
+static unsigned
+omap2_mcspi_txrx_pio(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ unsigned int count, c;
+ u32 l;
+ void __iomem *base = cs->base;
+ void __iomem *tx_reg;
+ void __iomem *rx_reg;
+ void __iomem *chstat_reg;
+ int word_len;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ count = xfer->len;
+ c = count;
+ word_len = cs->word_len;
+
+ l = mcspi_cached_chconf0(spi);
+
+ /* We store the pre-calculated register addresses on stack to speed
+ * up the transfer loop. */
+ tx_reg = base + OMAP2_MCSPI_TX0;
+ rx_reg = base + OMAP2_MCSPI_RX0;
+ chstat_reg = base + OMAP2_MCSPI_CHSTAT0;
+
+ if (c < (word_len>>3))
+ return 0;
+
+ if (word_len <= 8) {
+ u8 *rx;
+ const u8 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+
+ do {
+ c -= 1;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %02x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 1 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %02x\n",
+ word_len, *(rx - 1));
+ }
+ } while (c);
+ } else if (word_len <= 16) {
+ u16 *rx;
+ const u16 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 2;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %04x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 2 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %04x\n",
+ word_len, *(rx - 1));
+ }
+ } while (c >= 2);
+ } else if (word_len <= 32) {
+ u32 *rx;
+ const u32 *tx;
+
+ rx = xfer->rx_buf;
+ tx = xfer->tx_buf;
+ do {
+ c -= 4;
+ if (tx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ goto out;
+ }
+ dev_vdbg(&spi->dev, "write-%d %08x\n",
+ word_len, *tx);
+ writel_relaxed(*tx++, tx_reg);
+ }
+ if (rx != NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev, "RXS timed out\n");
+ goto out;
+ }
+
+ if (c == 4 && tx == NULL &&
+ (l & OMAP2_MCSPI_CHCONF_TURBO)) {
+ omap2_mcspi_set_enable(spi, 0);
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %08x\n",
+ word_len, *(rx - 1));
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_RXS) < 0) {
+ dev_err(&spi->dev,
+ "RXS timed out\n");
+ goto out;
+ }
+ c = 0;
+ } else if (c == 0 && tx == NULL) {
+ omap2_mcspi_set_enable(spi, 0);
+ }
+
+ *rx++ = readl_relaxed(rx_reg);
+ dev_vdbg(&spi->dev, "read-%d %08x\n",
+ word_len, *(rx - 1));
+ }
+ } while (c >= 4);
+ }
+
+ /* for TX_ONLY mode, be sure all words have shifted out */
+ if (xfer->rx_buf == NULL) {
+ if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_TXS) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ } else if (mcspi_wait_for_reg_bit(chstat_reg,
+ OMAP2_MCSPI_CHSTAT_EOT) < 0)
+ dev_err(&spi->dev, "EOT timed out\n");
+
+ /* disable chan to purge rx datas received in TX_ONLY transfer,
+ * otherwise these rx datas will affect the direct following
+ * RX_ONLY transfer.
+ */
+ omap2_mcspi_set_enable(spi, 0);
+ }
+out:
+ omap2_mcspi_set_enable(spi, 1);
+ return count - c;
+}
+
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+ u32 div;
+
+ for (div = 0; div < 15; div++)
+ if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+ return div;
+
+ return 15;
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+ struct omap2_mcspi *mcspi;
+ struct spi_master *spi_cntrl;
+ u32 l = 0, clkd = 0, div, extclk = 0, clkg = 0;
+ u8 word_len = spi->bits_per_word;
+ u32 speed_hz = spi->max_speed_hz;
+
+ mcspi = spi_master_get_devdata(spi->master);
+ spi_cntrl = mcspi->master;
+
+ if (t != NULL && t->bits_per_word)
+ word_len = t->bits_per_word;
+
+ cs->word_len = word_len;
+
+ if (t && t->speed_hz)
+ speed_hz = t->speed_hz;
+
+ speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_MAX_FREQ);
+ if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+ clkd = omap2_mcspi_calc_divisor(speed_hz);
+ speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+ clkg = 0;
+ } else {
+ div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+ speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+ clkd = (div - 1) & 0xf;
+ extclk = (div - 1) >> 4;
+ clkg = OMAP2_MCSPI_CHCONF_CLKG;
+ }
+
+ l = mcspi_cached_chconf0(spi);
+
+ /* standard 4-wire master mode: SCK, MOSI/out, MISO/in, nCS
+ * REVISIT: this controller could support SPI_3WIRE mode.
+ */
+ if (mcspi->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+ l &= ~OMAP2_MCSPI_CHCONF_IS;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+ l |= OMAP2_MCSPI_CHCONF_DPE0;
+ } else {
+ l |= OMAP2_MCSPI_CHCONF_IS;
+ l |= OMAP2_MCSPI_CHCONF_DPE1;
+ l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+ }
+
+ /* wordlength */
+ l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+ l |= (word_len - 1) << 7;
+
+ /* set chipselect polarity; manage with FORCE */
+ if (!(spi->mode & SPI_CS_HIGH))
+ l |= OMAP2_MCSPI_CHCONF_EPOL; /* active-low; normal */
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+ /* set clock divisor */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+ l |= clkd << 2;
+
+ /* set clock granularity */
+ l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+ l |= clkg;
+ if (clkg) {
+ cs->chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+ cs->chctrl0 |= extclk << 8;
+ mcspi_write_cs_reg(spi, OMAP2_MCSPI_CHCTRL0, cs->chctrl0);
+ }
+
+ /* set SPI mode 0..3 */
+ if (spi->mode & SPI_CPOL)
+ l |= OMAP2_MCSPI_CHCONF_POL;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_POL;
+ if (spi->mode & SPI_CPHA)
+ l |= OMAP2_MCSPI_CHCONF_PHA;
+ else
+ l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+ mcspi_write_chconf0(spi, l);
+
+ cs->mode = spi->mode;
+
+ dev_dbg(&spi->dev, "setup: speed %d, sample %s edge, clk %s\n",
+ speed_hz,
+ (spi->mode & SPI_CPHA) ? "trailing" : "leading",
+ (spi->mode & SPI_CPOL) ? "inverted" : "normal");
+
+ return 0;
+}
+
+/*
+ * Note that we currently allow DMA only if we get a channel
+ * for both rx and tx. Otherwise we'll do PIO for both rx and tx.
+ */
+static int omap2_mcspi_request_dma(struct spi_device *spi)
+{
+ struct spi_master *master = spi->master;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ dma_cap_mask_t mask;
+ unsigned sig;
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ sig = mcspi_dma->dma_rx_sync_dev;
+
+ mcspi_dma->dma_rx =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &sig, &master->dev,
+ mcspi_dma->dma_rx_ch_name);
+ if (!mcspi_dma->dma_rx)
+ goto no_dma;
+
+ sig = mcspi_dma->dma_tx_sync_dev;
+ mcspi_dma->dma_tx =
+ dma_request_slave_channel_compat(mask, omap_dma_filter_fn,
+ &sig, &master->dev,
+ mcspi_dma->dma_tx_ch_name);
+
+ if (!mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ goto no_dma;
+ }
+
+ return 0;
+
+no_dma:
+ dev_warn(&spi->dev, "not using DMA for McSPI\n");
+ return -EAGAIN;
+}
+
+static int omap2_mcspi_setup(struct spi_device *spi)
+{
+ int ret;
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs = spi->controller_state;
+
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (!cs) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ cs->base = mcspi->base + spi->chip_select * 0x14;
+ cs->phys = mcspi->phys + spi->chip_select * 0x14;
+ cs->mode = 0;
+ cs->chconf0 = 0;
+ cs->chctrl0 = 0;
+ spi->controller_state = cs;
+ /* Link this to context save list */
+ list_add_tail(&cs->node, &ctx->cs);
+ }
+
+ if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
+ ret = omap2_mcspi_request_dma(spi);
+ if (ret < 0 && ret != -EAGAIN)
+ return ret;
+ }
+
+ ret = pm_runtime_get_sync(mcspi->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = omap2_mcspi_setup_transfer(spi, NULL);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+
+ return ret;
+}
+
+static void omap2_mcspi_cleanup(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct omap2_mcspi_cs *cs;
+
+ mcspi = spi_master_get_devdata(spi->master);
+
+ if (spi->controller_state) {
+ /* Unlink controller state from context save list */
+ cs = spi->controller_state;
+ list_del(&cs->node);
+
+ kfree(cs);
+ }
+
+ if (spi->chip_select < spi->master->num_chipselect) {
+ mcspi_dma = &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ }
+ }
+}
+
+static void omap2_mcspi_work(struct omap2_mcspi *mcspi, struct spi_message *m)
+{
+
+ /* We only enable one channel at a time -- the one whose message is
+ * -- although this controller would gladly
+ * arbitrate among multiple channels. This corresponds to "single
+ * channel" master mode. As a side effect, we need to manage the
+ * chipselect with the FORCE bit ... CS != channel enable.
+ */
+
+ struct spi_device *spi;
+ struct spi_transfer *t = NULL;
+ struct spi_master *master;
+ struct omap2_mcspi_dma *mcspi_dma;
+ int cs_active = 0;
+ struct omap2_mcspi_cs *cs;
+ struct omap2_mcspi_device_config *cd;
+ int par_override = 0;
+ int status = 0;
+ u32 chconf;
+
+ spi = m->spi;
+ master = spi->master;
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+ cs = spi->controller_state;
+ cd = spi->controller_data;
+
+ /*
+ * The slave driver could have changed spi->mode in which case
+ * it will be different from cs->mode (the current hardware setup).
+ * If so, set par_override (even though its not a parity issue) so
+ * omap2_mcspi_setup_transfer will be called to configure the hardware
+ * with the correct mode on the first iteration of the loop below.
+ */
+ if (spi->mode != cs->mode)
+ par_override = 1;
+
+ omap2_mcspi_set_enable(spi, 0);
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (t->tx_buf == NULL && t->rx_buf == NULL && t->len) {
+ status = -EINVAL;
+ break;
+ }
+ if (par_override ||
+ (t->speed_hz != spi->max_speed_hz) ||
+ (t->bits_per_word != spi->bits_per_word)) {
+ par_override = 1;
+ status = omap2_mcspi_setup_transfer(spi, t);
+ if (status < 0)
+ break;
+ if (t->speed_hz == spi->max_speed_hz &&
+ t->bits_per_word == spi->bits_per_word)
+ par_override = 0;
+ }
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf &= ~OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
+
+ if (!cs_active) {
+ omap2_mcspi_force_cs(spi, 1);
+ cs_active = 1;
+ }
+
+ chconf = mcspi_cached_chconf0(spi);
+ chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+ chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+
+ if (t->tx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_RX_ONLY;
+ else if (t->rx_buf == NULL)
+ chconf |= OMAP2_MCSPI_CHCONF_TRM_TX_ONLY;
+
+ if (cd && cd->turbo_mode && t->tx_buf == NULL) {
+ /* Turbo mode is for more than one word */
+ if (t->len > ((cs->word_len + 7) >> 3))
+ chconf |= OMAP2_MCSPI_CHCONF_TURBO;
+ }
+
+ mcspi_write_chconf0(spi, chconf);
+
+ if (t->len) {
+ unsigned count;
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
+ omap2_mcspi_set_fifo(spi, t, 1);
+
+ omap2_mcspi_set_enable(spi, 1);
+
+ /* RX_ONLY mode needs dummy data in TX reg */
+ if (t->tx_buf == NULL)
+ writel_relaxed(0, cs->base
+ + OMAP2_MCSPI_TX0);
+
+ if ((mcspi_dma->dma_rx && mcspi_dma->dma_tx) &&
+ (m->is_dma_mapped || t->len >= DMA_MIN_BYTES))
+ count = omap2_mcspi_txrx_dma(spi, t);
+ else
+ count = omap2_mcspi_txrx_pio(spi, t);
+ m->actual_length += count;
+
+ if (count != t->len) {
+ status = -EIO;
+ break;
+ }
+ }
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ /* ignore the "leave it on after last xfer" hint */
+ if (t->cs_change) {
+ omap2_mcspi_force_cs(spi, 0);
+ cs_active = 0;
+ }
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (mcspi->fifo_depth > 0)
+ omap2_mcspi_set_fifo(spi, t, 0);
+ }
+ /* Restore defaults if they were overriden */
+ if (par_override) {
+ par_override = 0;
+ status = omap2_mcspi_setup_transfer(spi, NULL);
+ }
+
+ if (cs_active)
+ omap2_mcspi_force_cs(spi, 0);
+
+ if (cd && cd->cs_per_word) {
+ chconf = mcspi->ctx.modulctrl;
+ chconf |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+ mcspi_write_reg(master, OMAP2_MCSPI_MODULCTRL, chconf);
+ mcspi->ctx.modulctrl =
+ mcspi_read_cs_reg(spi, OMAP2_MCSPI_MODULCTRL);
+ }
+
+ omap2_mcspi_set_enable(spi, 0);
+
+ if (mcspi->fifo_depth > 0 && t)
+ omap2_mcspi_set_fifo(spi, t, 0);
+
+ m->status = status;
+}
+
+static int omap2_mcspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct spi_device *spi;
+ struct omap2_mcspi *mcspi;
+ struct omap2_mcspi_dma *mcspi_dma;
+ struct spi_transfer *t;
+ int status;
+
+ spi = m->spi;
+ mcspi = spi_master_get_devdata(master);
+ mcspi_dma = mcspi->dma_channels + spi->chip_select;
+ m->actual_length = 0;
+ m->status = 0;
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned len = t->len;
+
+ if ((len && !(rx_buf || tx_buf))) {
+ dev_dbg(mcspi->dev, "transfer: %d Hz, %d %s%s, %d bpw\n",
+ t->speed_hz,
+ len,
+ tx_buf ? "tx" : "",
+ rx_buf ? "rx" : "",
+ t->bits_per_word);
+ status = -EINVAL;
+ goto out;
+ }
+
+ if (m->is_dma_mapped || len < DMA_MIN_BYTES)
+ continue;
+
+ if (mcspi_dma->dma_tx && tx_buf != NULL) {
+ t->tx_dma = dma_map_single(mcspi->dev, (void *) tx_buf,
+ len, DMA_TO_DEVICE);
+ if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+ 'T', len);
+ status = -EINVAL;
+ goto out;
+ }
+ }
+ if (mcspi_dma->dma_rx && rx_buf != NULL) {
+ t->rx_dma = dma_map_single(mcspi->dev, rx_buf, t->len,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(mcspi->dev, t->rx_dma)) {
+ dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
+ 'R', len);
+ if (tx_buf != NULL)
+ dma_unmap_single(mcspi->dev, t->tx_dma,
+ len, DMA_TO_DEVICE);
+ status = -EINVAL;
+ goto out;
+ }
+ }
+ }
+
+ omap2_mcspi_work(mcspi, m);
+ /* spi_finalize_current_message() changes the status inside the
+ * spi_message, save the status here. */
+ status = m->status;
+out:
+ spi_finalize_current_message(master);
+ return status;
+}
+
+static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
+{
+ struct spi_master *master = mcspi->master;
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ int ret = 0;
+
+ ret = pm_runtime_get_sync(mcspi->dev);
+ if (ret < 0)
+ return ret;
+
+ mcspi_write_reg(master, OMAP2_MCSPI_WAKEUPENABLE,
+ OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+ ctx->wakeupenable = OMAP2_MCSPI_WAKEUPENABLE_WKEN;
+
+ omap2_mcspi_set_master_mode(master);
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+}
+
+static int omap_mcspi_runtime_resume(struct device *dev)
+{
+ struct omap2_mcspi *mcspi;
+ struct spi_master *master;
+
+ master = dev_get_drvdata(dev);
+ mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_restore_ctx(mcspi);
+
+ return 0;
+}
+
+static struct omap2_mcspi_platform_config omap2_pdata = {
+ .regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+ {
+ .compatible = "ti,omap2-mcspi",
+ .data = &omap2_pdata,
+ },
+ {
+ .compatible = "ti,omap4-mcspi",
+ .data = &omap4_pdata,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+static int omap2_mcspi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ const struct omap2_mcspi_platform_config *pdata;
+ struct omap2_mcspi *mcspi;
+ struct resource *r;
+ int status = 0, i;
+ u32 regs_offset = 0;
+ static int bus_num = 1;
+ struct device_node *node = pdev->dev.of_node;
+ const struct of_device_id *match;
+
+ master = spi_alloc_master(&pdev->dev, sizeof *mcspi);
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->setup = omap2_mcspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = omap2_mcspi_transfer_one_message;
+ master->cleanup = omap2_mcspi_cleanup;
+ master->dev.of_node = node;
+ master->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+ master->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+
+ platform_set_drvdata(pdev, master);
+
+ mcspi = spi_master_get_devdata(master);
+ mcspi->master = master;
+
+ match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+ if (match) {
+ u32 num_cs = 1; /* default number of chipselect */
+ pdata = match->data;
+
+ of_property_read_u32(node, "ti,spi-num-cs", &num_cs);
+ master->num_chipselect = num_cs;
+ master->bus_num = bus_num++;
+ if (of_get_property(node, "ti,pindir-d0-out-d1-in", NULL))
+ mcspi->pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+ } else {
+ pdata = dev_get_platdata(&pdev->dev);
+ master->num_chipselect = pdata->num_cs;
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+ mcspi->pin_dir = pdata->pin_dir;
+ }
+ regs_offset = pdata->regs_offset;
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ status = -ENODEV;
+ goto free_master;
+ }
+
+ r->start += regs_offset;
+ r->end += regs_offset;
+ mcspi->phys = r->start;
+
+ mcspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(mcspi->base)) {
+ status = PTR_ERR(mcspi->base);
+ goto free_master;
+ }
+
+ mcspi->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&mcspi->ctx.cs);
+
+ mcspi->dma_channels = devm_kcalloc(&pdev->dev, master->num_chipselect,
+ sizeof(struct omap2_mcspi_dma),
+ GFP_KERNEL);
+ if (mcspi->dma_channels == NULL) {
+ status = -ENOMEM;
+ goto free_master;
+ }
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ char *dma_rx_ch_name = mcspi->dma_channels[i].dma_rx_ch_name;
+ char *dma_tx_ch_name = mcspi->dma_channels[i].dma_tx_ch_name;
+ struct resource *dma_res;
+
+ sprintf(dma_rx_ch_name, "rx%d", i);
+ if (!pdev->dev.of_node) {
+ dma_res =
+ platform_get_resource_byname(pdev,
+ IORESOURCE_DMA,
+ dma_rx_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev,
+ "cannot get DMA RX channel\n");
+ status = -ENODEV;
+ break;
+ }
+
+ mcspi->dma_channels[i].dma_rx_sync_dev =
+ dma_res->start;
+ }
+ sprintf(dma_tx_ch_name, "tx%d", i);
+ if (!pdev->dev.of_node) {
+ dma_res =
+ platform_get_resource_byname(pdev,
+ IORESOURCE_DMA,
+ dma_tx_ch_name);
+ if (!dma_res) {
+ dev_dbg(&pdev->dev,
+ "cannot get DMA TX channel\n");
+ status = -ENODEV;
+ break;
+ }
+
+ mcspi->dma_channels[i].dma_tx_sync_dev =
+ dma_res->start;
+ }
+ }
+
+ if (status < 0)
+ goto free_master;
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ status = omap2_mcspi_master_setup(mcspi);
+ if (status < 0)
+ goto disable_pm;
+
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status < 0)
+ goto disable_pm;
+
+ return status;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+free_master:
+ spi_master_put(master);
+ return status;
+}
+
+static int omap2_mcspi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+
+ pm_runtime_put_sync(mcspi->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:omap2_mcspi");
+
+#ifdef CONFIG_SUSPEND
+/*
+ * When SPI wake up from off-mode, CS is in activate state. If it was in
+ * unactive state when driver was suspend, then force it to unactive state at
+ * wake up.
+ */
+static int omap2_mcspi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_regs *ctx = &mcspi->ctx;
+ struct omap2_mcspi_cs *cs;
+
+ pm_runtime_get_sync(mcspi->dev);
+ list_for_each_entry(cs, &ctx->cs, node) {
+ if ((cs->chconf0 & OMAP2_MCSPI_CHCONF_FORCE) == 0) {
+ /*
+ * We need to toggle CS state for OMAP take this
+ * change in account.
+ */
+ cs->chconf0 |= OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ cs->chconf0 &= ~OMAP2_MCSPI_CHCONF_FORCE;
+ writel_relaxed(cs->chconf0, cs->base + OMAP2_MCSPI_CHCONF0);
+ }
+ }
+ pm_runtime_mark_last_busy(mcspi->dev);
+ pm_runtime_put_autosuspend(mcspi->dev);
+ return 0;
+}
+#else
+#define omap2_mcspi_resume NULL
+#endif
+
+static const struct dev_pm_ops omap2_mcspi_pm_ops = {
+ .resume = omap2_mcspi_resume,
+ .runtime_resume = omap_mcspi_runtime_resume,
+};
+
+static struct platform_driver omap2_mcspi_driver = {
+ .driver = {
+ .name = "omap2_mcspi",
+ .pm = &omap2_mcspi_pm_ops,
+ .of_match_table = omap_mcspi_of_match,
+ },
+ .probe = omap2_mcspi_probe,
+ .remove = omap2_mcspi_remove,
+};
+
+module_platform_driver(omap2_mcspi_driver);
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
new file mode 100644
index 000000000..ff97cabda
--- /dev/null
+++ b/drivers/spi/spi-orion.c
@@ -0,0 +1,572 @@
+/*
+ * Marvell Orion SPI controller driver
+ *
+ * Author: Shadi Ammouri <shadi@marvell.com>
+ * Copyright (C) 2007-2008 Marvell Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/clk.h>
+#include <linux/sizes.h>
+#include <asm/unaligned.h>
+
+#define DRIVER_NAME "orion_spi"
+
+/* Runtime PM autosuspend timeout: PM is fairly light on this driver */
+#define SPI_AUTOSUSPEND_TIMEOUT 200
+
+/* Some SoCs using this driver support up to 8 chip selects.
+ * It is up to the implementer to only use the chip selects
+ * that are available.
+ */
+#define ORION_NUM_CHIPSELECTS 8
+
+#define ORION_SPI_WAIT_RDY_MAX_LOOP 2000 /* in usec */
+
+#define ORION_SPI_IF_CTRL_REG 0x00
+#define ORION_SPI_IF_CONFIG_REG 0x04
+#define ORION_SPI_DATA_OUT_REG 0x08
+#define ORION_SPI_DATA_IN_REG 0x0c
+#define ORION_SPI_INT_CAUSE_REG 0x10
+
+#define ORION_SPI_MODE_CPOL (1 << 11)
+#define ORION_SPI_MODE_CPHA (1 << 12)
+#define ORION_SPI_IF_8_16_BIT_MODE (1 << 5)
+#define ORION_SPI_CLK_PRESCALE_MASK 0x1F
+#define ARMADA_SPI_CLK_PRESCALE_MASK 0xDF
+#define ORION_SPI_MODE_MASK (ORION_SPI_MODE_CPOL | \
+ ORION_SPI_MODE_CPHA)
+#define ORION_SPI_CS_MASK 0x1C
+#define ORION_SPI_CS_SHIFT 2
+#define ORION_SPI_CS(cs) ((cs << ORION_SPI_CS_SHIFT) & \
+ ORION_SPI_CS_MASK)
+
+enum orion_spi_type {
+ ORION_SPI,
+ ARMADA_SPI,
+};
+
+struct orion_spi_dev {
+ enum orion_spi_type typ;
+ /*
+ * min_divisor and max_hz should be exclusive, the only we can
+ * have both is for managing the armada-370-spi case with old
+ * device tree
+ */
+ unsigned long max_hz;
+ unsigned int min_divisor;
+ unsigned int max_divisor;
+ u32 prescale_mask;
+};
+
+struct orion_spi {
+ struct spi_master *master;
+ void __iomem *base;
+ struct clk *clk;
+ const struct orion_spi_dev *devdata;
+};
+
+static inline void __iomem *spi_reg(struct orion_spi *orion_spi, u32 reg)
+{
+ return orion_spi->base + reg;
+}
+
+static inline void
+orion_spi_setbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
+{
+ void __iomem *reg_addr = spi_reg(orion_spi, reg);
+ u32 val;
+
+ val = readl(reg_addr);
+ val |= mask;
+ writel(val, reg_addr);
+}
+
+static inline void
+orion_spi_clrbits(struct orion_spi *orion_spi, u32 reg, u32 mask)
+{
+ void __iomem *reg_addr = spi_reg(orion_spi, reg);
+ u32 val;
+
+ val = readl(reg_addr);
+ val &= ~mask;
+ writel(val, reg_addr);
+}
+
+static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
+{
+ u32 tclk_hz;
+ u32 rate;
+ u32 prescale;
+ u32 reg;
+ struct orion_spi *orion_spi;
+ const struct orion_spi_dev *devdata;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ devdata = orion_spi->devdata;
+
+ tclk_hz = clk_get_rate(orion_spi->clk);
+
+ if (devdata->typ == ARMADA_SPI) {
+ unsigned int clk, spr, sppr, sppr2, err;
+ unsigned int best_spr, best_sppr, best_err;
+
+ best_err = speed;
+ best_spr = 0;
+ best_sppr = 0;
+
+ /* Iterate over the valid range looking for best fit */
+ for (sppr = 0; sppr < 8; sppr++) {
+ sppr2 = 0x1 << sppr;
+
+ spr = tclk_hz / sppr2;
+ spr = DIV_ROUND_UP(spr, speed);
+ if ((spr == 0) || (spr > 15))
+ continue;
+
+ clk = tclk_hz / (spr * sppr2);
+ err = speed - clk;
+
+ if (err < best_err) {
+ best_spr = spr;
+ best_sppr = sppr;
+ best_err = err;
+ }
+ }
+
+ if ((best_sppr == 0) && (best_spr == 0))
+ return -EINVAL;
+
+ prescale = ((best_sppr & 0x6) << 5) |
+ ((best_sppr & 0x1) << 4) | best_spr;
+ } else {
+ /*
+ * the supported rates are: 4,6,8...30
+ * round up as we look for equal or less speed
+ */
+ rate = DIV_ROUND_UP(tclk_hz, speed);
+ rate = roundup(rate, 2);
+
+ /* check if requested speed is too small */
+ if (rate > 30)
+ return -EINVAL;
+
+ if (rate < 4)
+ rate = 4;
+
+ /* Convert the rate to SPI clock divisor value. */
+ prescale = 0x10 + rate/2;
+ }
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg = ((reg & ~devdata->prescale_mask) | prescale);
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+
+ return 0;
+}
+
+static void
+orion_spi_mode_set(struct spi_device *spi)
+{
+ u32 reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ reg = readl(spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+ reg &= ~ORION_SPI_MODE_MASK;
+ if (spi->mode & SPI_CPOL)
+ reg |= ORION_SPI_MODE_CPOL;
+ if (spi->mode & SPI_CPHA)
+ reg |= ORION_SPI_MODE_CPHA;
+ writel(reg, spi_reg(orion_spi, ORION_SPI_IF_CONFIG_REG));
+}
+
+/*
+ * called only when no transfer is active on the bus
+ */
+static int
+orion_spi_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct orion_spi *orion_spi;
+ unsigned int speed = spi->max_speed_hz;
+ unsigned int bits_per_word = spi->bits_per_word;
+ int rc;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ if ((t != NULL) && t->speed_hz)
+ speed = t->speed_hz;
+
+ if ((t != NULL) && t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ orion_spi_mode_set(spi);
+
+ rc = orion_spi_baudrate_set(spi, speed);
+ if (rc)
+ return rc;
+
+ if (bits_per_word == 16)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CONFIG_REG,
+ ORION_SPI_IF_8_16_BIT_MODE);
+
+ return 0;
+}
+
+static void orion_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, ORION_SPI_CS_MASK);
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG,
+ ORION_SPI_CS(spi->chip_select));
+
+ /* Chip select logic is inverted from spi_set_cs */
+ if (!enable)
+ orion_spi_setbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+ else
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+}
+
+static inline int orion_spi_wait_till_ready(struct orion_spi *orion_spi)
+{
+ int i;
+
+ for (i = 0; i < ORION_SPI_WAIT_RDY_MAX_LOOP; i++) {
+ if (readl(spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG)))
+ return 1;
+
+ udelay(1);
+ }
+
+ return -1;
+}
+
+static inline int
+orion_spi_write_read_8bit(struct spi_device *spi,
+ const u8 **tx_buf, u8 **rx_buf)
+{
+ void __iomem *tx_reg, *rx_reg, *int_reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
+ rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
+ int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
+
+ /* clear the interrupt cause register */
+ writel(0x0, int_reg);
+
+ if (tx_buf && *tx_buf)
+ writel(*(*tx_buf)++, tx_reg);
+ else
+ writel(0, tx_reg);
+
+ if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ return -1;
+ }
+
+ if (rx_buf && *rx_buf)
+ *(*rx_buf)++ = readl(rx_reg);
+
+ return 1;
+}
+
+static inline int
+orion_spi_write_read_16bit(struct spi_device *spi,
+ const u16 **tx_buf, u16 **rx_buf)
+{
+ void __iomem *tx_reg, *rx_reg, *int_reg;
+ struct orion_spi *orion_spi;
+
+ orion_spi = spi_master_get_devdata(spi->master);
+ tx_reg = spi_reg(orion_spi, ORION_SPI_DATA_OUT_REG);
+ rx_reg = spi_reg(orion_spi, ORION_SPI_DATA_IN_REG);
+ int_reg = spi_reg(orion_spi, ORION_SPI_INT_CAUSE_REG);
+
+ /* clear the interrupt cause register */
+ writel(0x0, int_reg);
+
+ if (tx_buf && *tx_buf)
+ writel(__cpu_to_le16(get_unaligned((*tx_buf)++)), tx_reg);
+ else
+ writel(0, tx_reg);
+
+ if (orion_spi_wait_till_ready(orion_spi) < 0) {
+ dev_err(&spi->dev, "TXS timed out\n");
+ return -1;
+ }
+
+ if (rx_buf && *rx_buf)
+ put_unaligned(__le16_to_cpu(readl(rx_reg)), (*rx_buf)++);
+
+ return 1;
+}
+
+static unsigned int
+orion_spi_write_read(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ unsigned int count;
+ int word_len;
+
+ word_len = spi->bits_per_word;
+ count = xfer->len;
+
+ if (word_len == 8) {
+ const u8 *tx = xfer->tx_buf;
+ u8 *rx = xfer->rx_buf;
+
+ do {
+ if (orion_spi_write_read_8bit(spi, &tx, &rx) < 0)
+ goto out;
+ count--;
+ } while (count);
+ } else if (word_len == 16) {
+ const u16 *tx = xfer->tx_buf;
+ u16 *rx = xfer->rx_buf;
+
+ do {
+ if (orion_spi_write_read_16bit(spi, &tx, &rx) < 0)
+ goto out;
+ count -= 2;
+ } while (count);
+ }
+
+out:
+ return xfer->len - count;
+}
+
+static int orion_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ int status = 0;
+
+ status = orion_spi_setup_transfer(spi, t);
+ if (status < 0)
+ return status;
+
+ if (t->len)
+ orion_spi_write_read(spi, t);
+
+ return status;
+}
+
+static int orion_spi_setup(struct spi_device *spi)
+{
+ return orion_spi_setup_transfer(spi, NULL);
+}
+
+static int orion_spi_reset(struct orion_spi *orion_spi)
+{
+ /* Verify that the CS is deasserted */
+ orion_spi_clrbits(orion_spi, ORION_SPI_IF_CTRL_REG, 0x1);
+ return 0;
+}
+
+static const struct orion_spi_dev orion_spi_dev_data = {
+ .typ = ORION_SPI,
+ .min_divisor = 4,
+ .max_divisor = 30,
+ .prescale_mask = ORION_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct orion_spi_dev armada_spi_dev_data = {
+ .typ = ARMADA_SPI,
+ .min_divisor = 4,
+ .max_divisor = 1920,
+ .max_hz = 50000000,
+ .prescale_mask = ARMADA_SPI_CLK_PRESCALE_MASK,
+};
+
+static const struct of_device_id orion_spi_of_match_table[] = {
+ { .compatible = "marvell,orion-spi", .data = &orion_spi_dev_data, },
+ { .compatible = "marvell,armada-370-spi", .data = &armada_spi_dev_data, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, orion_spi_of_match_table);
+
+static int orion_spi_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *of_id;
+ const struct orion_spi_dev *devdata;
+ struct spi_master *master;
+ struct orion_spi *spi;
+ struct resource *r;
+ unsigned long tclk_hz;
+ int status = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi));
+ if (master == NULL) {
+ dev_dbg(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ if (pdev->id != -1)
+ master->bus_num = pdev->id;
+ if (pdev->dev.of_node) {
+ u32 cell_index;
+
+ if (!of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &cell_index))
+ master->bus_num = cell_index;
+ }
+
+ /* we support only mode 0, and no options */
+ master->mode_bits = SPI_CPHA | SPI_CPOL;
+ master->set_cs = orion_spi_set_cs;
+ master->transfer_one = orion_spi_transfer_one;
+ master->num_chipselect = ORION_NUM_CHIPSELECTS;
+ master->setup = orion_spi_setup;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->auto_runtime_pm = true;
+
+ platform_set_drvdata(pdev, master);
+
+ spi = spi_master_get_devdata(master);
+ spi->master = master;
+
+ of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
+ devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
+ spi->devdata = devdata;
+
+ spi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(spi->clk)) {
+ status = PTR_ERR(spi->clk);
+ goto out;
+ }
+
+ status = clk_prepare_enable(spi->clk);
+ if (status)
+ goto out;
+
+ tclk_hz = clk_get_rate(spi->clk);
+
+ /*
+ * With old device tree, armada-370-spi could be used with
+ * Armada XP, however for this SoC the maximum frequency is
+ * 50MHz instead of tclk/4. On Armada 370, tclk cannot be
+ * higher than 200MHz. So, in order to be able to handle both
+ * SoCs, we can take the minimum of 50MHz and tclk/4.
+ */
+ if (of_device_is_compatible(pdev->dev.of_node,
+ "marvell,armada-370-spi"))
+ master->max_speed_hz = min(devdata->max_hz,
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor));
+ else
+ master->max_speed_hz =
+ DIV_ROUND_UP(tclk_hz, devdata->min_divisor);
+ master->min_speed_hz = DIV_ROUND_UP(tclk_hz, devdata->max_divisor);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(spi->base)) {
+ status = PTR_ERR(spi->base);
+ goto out_rel_clk;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, SPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ status = orion_spi_reset(spi);
+ if (status < 0)
+ goto out_rel_pm;
+
+ pm_runtime_mark_last_busy(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ status = spi_register_master(master);
+ if (status < 0)
+ goto out_rel_pm;
+
+ return status;
+
+out_rel_pm:
+ pm_runtime_disable(&pdev->dev);
+out_rel_clk:
+ clk_disable_unprepare(spi->clk);
+out:
+ spi_master_put(master);
+ return status;
+}
+
+
+static int orion_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ pm_runtime_get_sync(&pdev->dev);
+ clk_disable_unprepare(spi->clk);
+
+ spi_unregister_master(master);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" DRIVER_NAME);
+
+#ifdef CONFIG_PM
+static int orion_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(spi->clk);
+ return 0;
+}
+
+static int orion_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct orion_spi *spi = spi_master_get_devdata(master);
+
+ return clk_prepare_enable(spi->clk);
+}
+#endif
+
+static const struct dev_pm_ops orion_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(orion_spi_runtime_suspend,
+ orion_spi_runtime_resume,
+ NULL)
+};
+
+static struct platform_driver orion_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &orion_spi_pm_ops,
+ .of_match_table = of_match_ptr(orion_spi_of_match_table),
+ },
+ .probe = orion_spi_probe,
+ .remove = orion_spi_remove,
+};
+
+module_platform_driver(orion_spi_driver);
+
+MODULE_DESCRIPTION("Orion SPI driver");
+MODULE_AUTHOR("Shadi Ammouri <shadi@marvell.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
new file mode 100644
index 000000000..94af80676
--- /dev/null
+++ b/drivers/spi/spi-pl022.c
@@ -0,0 +1,2492 @@
+/*
+ * A driver for the ARM PL022 PrimeCell SSP/SPI bus master.
+ *
+ * Copyright (C) 2008-2012 ST-Ericsson AB
+ * Copyright (C) 2006 STMicroelectronics Pvt. Ltd.
+ *
+ * Author: Linus Walleij <linus.walleij@stericsson.com>
+ *
+ * Initial version inspired by:
+ * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c
+ * Initial adoption to PL022 by:
+ * Sachin Verma <sachin.verma@st.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/amba/bus.h>
+#include <linux/amba/pl022.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/scatterlist.h>
+#include <linux/pm_runtime.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
+
+/*
+ * This macro is used to define some register default values.
+ * reg is masked with mask, the OR:ed with an (again masked)
+ * val shifted sb steps to the left.
+ */
+#define SSP_WRITE_BITS(reg, val, mask, sb) \
+ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask))))
+
+/*
+ * This macro is also used to define some default values.
+ * It will just shift val by sb steps to the left and mask
+ * the result with mask.
+ */
+#define GEN_MASK_BITS(val, mask, sb) \
+ (((val)<<(sb)) & (mask))
+
+#define DRIVE_TX 0
+#define DO_NOT_DRIVE_TX 1
+
+#define DO_NOT_QUEUE_DMA 0
+#define QUEUE_DMA 1
+
+#define RX_TRANSFER 1
+#define TX_TRANSFER 2
+
+/*
+ * Macros to access SSP Registers with their offsets
+ */
+#define SSP_CR0(r) (r + 0x000)
+#define SSP_CR1(r) (r + 0x004)
+#define SSP_DR(r) (r + 0x008)
+#define SSP_SR(r) (r + 0x00C)
+#define SSP_CPSR(r) (r + 0x010)
+#define SSP_IMSC(r) (r + 0x014)
+#define SSP_RIS(r) (r + 0x018)
+#define SSP_MIS(r) (r + 0x01C)
+#define SSP_ICR(r) (r + 0x020)
+#define SSP_DMACR(r) (r + 0x024)
+#define SSP_CSR(r) (r + 0x030) /* vendor extension */
+#define SSP_ITCR(r) (r + 0x080)
+#define SSP_ITIP(r) (r + 0x084)
+#define SSP_ITOP(r) (r + 0x088)
+#define SSP_TDR(r) (r + 0x08C)
+
+#define SSP_PID0(r) (r + 0xFE0)
+#define SSP_PID1(r) (r + 0xFE4)
+#define SSP_PID2(r) (r + 0xFE8)
+#define SSP_PID3(r) (r + 0xFEC)
+
+#define SSP_CID0(r) (r + 0xFF0)
+#define SSP_CID1(r) (r + 0xFF4)
+#define SSP_CID2(r) (r + 0xFF8)
+#define SSP_CID3(r) (r + 0xFFC)
+
+/*
+ * SSP Control Register 0 - SSP_CR0
+ */
+#define SSP_CR0_MASK_DSS (0x0FUL << 0)
+#define SSP_CR0_MASK_FRF (0x3UL << 4)
+#define SSP_CR0_MASK_SPO (0x1UL << 6)
+#define SSP_CR0_MASK_SPH (0x1UL << 7)
+#define SSP_CR0_MASK_SCR (0xFFUL << 8)
+
+/*
+ * The ST version of this block moves som bits
+ * in SSP_CR0 and extends it to 32 bits
+ */
+#define SSP_CR0_MASK_DSS_ST (0x1FUL << 0)
+#define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5)
+#define SSP_CR0_MASK_CSS_ST (0x1FUL << 16)
+#define SSP_CR0_MASK_FRF_ST (0x3UL << 21)
+
+/*
+ * SSP Control Register 0 - SSP_CR1
+ */
+#define SSP_CR1_MASK_LBM (0x1UL << 0)
+#define SSP_CR1_MASK_SSE (0x1UL << 1)
+#define SSP_CR1_MASK_MS (0x1UL << 2)
+#define SSP_CR1_MASK_SOD (0x1UL << 3)
+
+/*
+ * The ST version of this block adds some bits
+ * in SSP_CR1
+ */
+#define SSP_CR1_MASK_RENDN_ST (0x1UL << 4)
+#define SSP_CR1_MASK_TENDN_ST (0x1UL << 5)
+#define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6)
+#define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7)
+#define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10)
+/* This one is only in the PL023 variant */
+#define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13)
+
+/*
+ * SSP Status Register - SSP_SR
+ */
+#define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */
+#define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */
+#define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */
+#define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */
+#define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */
+
+/*
+ * SSP Clock Prescale Register - SSP_CPSR
+ */
+#define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0)
+
+/*
+ * SSP Interrupt Mask Set/Clear Register - SSP_IMSC
+ */
+#define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */
+#define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */
+#define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */
+#define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */
+
+/*
+ * SSP Raw Interrupt Status Register - SSP_RIS
+ */
+/* Receive Overrun Raw Interrupt status */
+#define SSP_RIS_MASK_RORRIS (0x1UL << 0)
+/* Receive Timeout Raw Interrupt status */
+#define SSP_RIS_MASK_RTRIS (0x1UL << 1)
+/* Receive FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_RXRIS (0x1UL << 2)
+/* Transmit FIFO Raw Interrupt status */
+#define SSP_RIS_MASK_TXRIS (0x1UL << 3)
+
+/*
+ * SSP Masked Interrupt Status Register - SSP_MIS
+ */
+/* Receive Overrun Masked Interrupt status */
+#define SSP_MIS_MASK_RORMIS (0x1UL << 0)
+/* Receive Timeout Masked Interrupt status */
+#define SSP_MIS_MASK_RTMIS (0x1UL << 1)
+/* Receive FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_RXMIS (0x1UL << 2)
+/* Transmit FIFO Masked Interrupt status */
+#define SSP_MIS_MASK_TXMIS (0x1UL << 3)
+
+/*
+ * SSP Interrupt Clear Register - SSP_ICR
+ */
+/* Receive Overrun Raw Clear Interrupt bit */
+#define SSP_ICR_MASK_RORIC (0x1UL << 0)
+/* Receive Timeout Clear Interrupt bit */
+#define SSP_ICR_MASK_RTIC (0x1UL << 1)
+
+/*
+ * SSP DMA Control Register - SSP_DMACR
+ */
+/* Receive DMA Enable bit */
+#define SSP_DMACR_MASK_RXDMAE (0x1UL << 0)
+/* Transmit DMA Enable bit */
+#define SSP_DMACR_MASK_TXDMAE (0x1UL << 1)
+
+/*
+ * SSP Chip Select Control Register - SSP_CSR
+ * (vendor extension)
+ */
+#define SSP_CSR_CSVALUE_MASK (0x1FUL << 0)
+
+/*
+ * SSP Integration Test control Register - SSP_ITCR
+ */
+#define SSP_ITCR_MASK_ITEN (0x1UL << 0)
+#define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1)
+
+/*
+ * SSP Integration Test Input Register - SSP_ITIP
+ */
+#define ITIP_MASK_SSPRXD (0x1UL << 0)
+#define ITIP_MASK_SSPFSSIN (0x1UL << 1)
+#define ITIP_MASK_SSPCLKIN (0x1UL << 2)
+#define ITIP_MASK_RXDMAC (0x1UL << 3)
+#define ITIP_MASK_TXDMAC (0x1UL << 4)
+#define ITIP_MASK_SSPTXDIN (0x1UL << 5)
+
+/*
+ * SSP Integration Test output Register - SSP_ITOP
+ */
+#define ITOP_MASK_SSPTXD (0x1UL << 0)
+#define ITOP_MASK_SSPFSSOUT (0x1UL << 1)
+#define ITOP_MASK_SSPCLKOUT (0x1UL << 2)
+#define ITOP_MASK_SSPOEn (0x1UL << 3)
+#define ITOP_MASK_SSPCTLOEn (0x1UL << 4)
+#define ITOP_MASK_RORINTR (0x1UL << 5)
+#define ITOP_MASK_RTINTR (0x1UL << 6)
+#define ITOP_MASK_RXINTR (0x1UL << 7)
+#define ITOP_MASK_TXINTR (0x1UL << 8)
+#define ITOP_MASK_INTR (0x1UL << 9)
+#define ITOP_MASK_RXDMABREQ (0x1UL << 10)
+#define ITOP_MASK_RXDMASREQ (0x1UL << 11)
+#define ITOP_MASK_TXDMABREQ (0x1UL << 12)
+#define ITOP_MASK_TXDMASREQ (0x1UL << 13)
+
+/*
+ * SSP Test Data Register - SSP_TDR
+ */
+#define TDR_MASK_TESTDATA (0xFFFFFFFF)
+
+/*
+ * Message State
+ * we use the spi_message.state (void *) pointer to
+ * hold a single state value, that's why all this
+ * (void *) casting is done here.
+ */
+#define STATE_START ((void *) 0)
+#define STATE_RUNNING ((void *) 1)
+#define STATE_DONE ((void *) 2)
+#define STATE_ERROR ((void *) -1)
+
+/*
+ * SSP State - Whether Enabled or Disabled
+ */
+#define SSP_DISABLED (0)
+#define SSP_ENABLED (1)
+
+/*
+ * SSP DMA State - Whether DMA Enabled or Disabled
+ */
+#define SSP_DMA_DISABLED (0)
+#define SSP_DMA_ENABLED (1)
+
+/*
+ * SSP Clock Defaults
+ */
+#define SSP_DEFAULT_CLKRATE 0x2
+#define SSP_DEFAULT_PRESCALE 0x40
+
+/*
+ * SSP Clock Parameter ranges
+ */
+#define CPSDVR_MIN 0x02
+#define CPSDVR_MAX 0xFE
+#define SCR_MIN 0x00
+#define SCR_MAX 0xFF
+
+/*
+ * SSP Interrupt related Macros
+ */
+#define DEFAULT_SSP_REG_IMSC 0x0UL
+#define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC
+#define ENABLE_ALL_INTERRUPTS ( \
+ SSP_IMSC_MASK_RORIM | \
+ SSP_IMSC_MASK_RTIM | \
+ SSP_IMSC_MASK_RXIM | \
+ SSP_IMSC_MASK_TXIM \
+)
+
+#define CLEAR_ALL_INTERRUPTS 0x3
+
+#define SPI_POLLING_TIMEOUT 1000
+
+/*
+ * The type of reading going on on this chip
+ */
+enum ssp_reading {
+ READING_NULL,
+ READING_U8,
+ READING_U16,
+ READING_U32
+};
+
+/**
+ * The type of writing going on on this chip
+ */
+enum ssp_writing {
+ WRITING_NULL,
+ WRITING_U8,
+ WRITING_U16,
+ WRITING_U32
+};
+
+/**
+ * struct vendor_data - vendor-specific config parameters
+ * for PL022 derivates
+ * @fifodepth: depth of FIFOs (both)
+ * @max_bpw: maximum number of bits per word
+ * @unidir: supports unidirection transfers
+ * @extended_cr: 32 bit wide control register 0 with extra
+ * features and extra features in CR1 as found in the ST variants
+ * @pl023: supports a subset of the ST extensions called "PL023"
+ * @internal_cs_ctrl: supports chip select control register
+ */
+struct vendor_data {
+ int fifodepth;
+ int max_bpw;
+ bool unidir;
+ bool extended_cr;
+ bool pl023;
+ bool loopback;
+ bool internal_cs_ctrl;
+};
+
+/**
+ * struct pl022 - This is the private SSP driver data structure
+ * @adev: AMBA device model hookup
+ * @vendor: vendor data for the IP block
+ * @phybase: the physical memory where the SSP device resides
+ * @virtbase: the virtual memory where the SSP is mapped
+ * @clk: outgoing clock "SPICLK" for the SPI bus
+ * @master: SPI framework hookup
+ * @master_info: controller-specific data from machine setup
+ * @kworker: thread struct for message pump
+ * @kworker_task: pointer to task for message pump kworker thread
+ * @pump_messages: work struct for scheduling work to the message pump
+ * @queue_lock: spinlock to syncronise access to message queue
+ * @queue: message queue
+ * @busy: message pump is busy
+ * @running: message pump is running
+ * @pump_transfers: Tasklet used in Interrupt Transfer mode
+ * @cur_msg: Pointer to current spi_message being processed
+ * @cur_transfer: Pointer to current spi_transfer
+ * @cur_chip: pointer to current clients chip(assigned from controller_state)
+ * @next_msg_cs_active: the next message in the queue has been examined
+ * and it was found that it uses the same chip select as the previous
+ * message, so we left it active after the previous transfer, and it's
+ * active already.
+ * @tx: current position in TX buffer to be read
+ * @tx_end: end position in TX buffer to be read
+ * @rx: current position in RX buffer to be written
+ * @rx_end: end position in RX buffer to be written
+ * @read: the type of read currently going on
+ * @write: the type of write currently going on
+ * @exp_fifo_level: expected FIFO level
+ * @dma_rx_channel: optional channel for RX DMA
+ * @dma_tx_channel: optional channel for TX DMA
+ * @sgt_rx: scattertable for the RX transfer
+ * @sgt_tx: scattertable for the TX transfer
+ * @dummypage: a dummy page used for driving data on the bus with DMA
+ * @cur_cs: current chip select (gpio)
+ * @chipselects: list of chipselects (gpios)
+ */
+struct pl022 {
+ struct amba_device *adev;
+ struct vendor_data *vendor;
+ resource_size_t phybase;
+ void __iomem *virtbase;
+ struct clk *clk;
+ struct spi_master *master;
+ struct pl022_ssp_controller *master_info;
+ /* Message per-transfer pump */
+ struct tasklet_struct pump_transfers;
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ bool next_msg_cs_active;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ enum ssp_reading read;
+ enum ssp_writing write;
+ u32 exp_fifo_level;
+ enum ssp_rx_level_trig rx_lev_trig;
+ enum ssp_tx_level_trig tx_lev_trig;
+ /* DMA settings */
+#ifdef CONFIG_DMA_ENGINE
+ struct dma_chan *dma_rx_channel;
+ struct dma_chan *dma_tx_channel;
+ struct sg_table sgt_rx;
+ struct sg_table sgt_tx;
+ char *dummypage;
+ bool dma_running;
+#endif
+ int cur_cs;
+ int *chipselects;
+};
+
+/**
+ * struct chip_data - To maintain runtime state of SSP for each client chip
+ * @cr0: Value of control register CR0 of SSP - on later ST variants this
+ * register is 32 bits wide rather than just 16
+ * @cr1: Value of control register CR1 of SSP
+ * @dmacr: Value of DMA control Register of SSP
+ * @cpsr: Value of Clock prescale register
+ * @n_bytes: how many bytes(power of 2) reqd for a given data width of client
+ * @enable_dma: Whether to enable DMA or not
+ * @read: function ptr to be used to read when doing xfer for this chip
+ * @write: function ptr to be used to write when doing xfer for this chip
+ * @cs_control: chip select callback provided by chip
+ * @xfer_type: polling/interrupt/DMA
+ *
+ * Runtime state of the SSP controller, maintained per chip,
+ * This would be set according to the current message that would be served
+ */
+struct chip_data {
+ u32 cr0;
+ u16 cr1;
+ u16 dmacr;
+ u16 cpsr;
+ u8 n_bytes;
+ bool enable_dma;
+ enum ssp_reading read;
+ enum ssp_writing write;
+ void (*cs_control) (u32 command);
+ int xfer_type;
+};
+
+/**
+ * null_cs_control - Dummy chip select function
+ * @command: select/delect the chip
+ *
+ * If no chip select function is provided by client this is used as dummy
+ * chip select
+ */
+static void null_cs_control(u32 command)
+{
+ pr_debug("pl022: dummy chip select control, CS=0x%x\n", command);
+}
+
+/**
+ * internal_cs_control - Control chip select signals via SSP_CSR.
+ * @pl022: SSP driver private data structure
+ * @command: select/delect the chip
+ *
+ * Used on controller with internal chip select control via SSP_CSR register
+ * (vendor extension). Each of the 5 LSB in the register controls one chip
+ * select signal.
+ */
+static void internal_cs_control(struct pl022 *pl022, u32 command)
+{
+ u32 tmp;
+
+ tmp = readw(SSP_CSR(pl022->virtbase));
+ if (command == SSP_CHIP_SELECT)
+ tmp &= ~BIT(pl022->cur_cs);
+ else
+ tmp |= BIT(pl022->cur_cs);
+ writew(tmp, SSP_CSR(pl022->virtbase));
+}
+
+static void pl022_cs_control(struct pl022 *pl022, u32 command)
+{
+ if (pl022->vendor->internal_cs_ctrl)
+ internal_cs_control(pl022, command);
+ else if (gpio_is_valid(pl022->cur_cs))
+ gpio_set_value(pl022->cur_cs, command);
+ else
+ pl022->cur_chip->cs_control(command);
+}
+
+/**
+ * giveback - current spi_message is over, schedule next message and call
+ * callback of this message. Assumes that caller already
+ * set message->status; dma and pio irqs are blocked
+ * @pl022: SSP driver private data structure
+ */
+static void giveback(struct pl022 *pl022)
+{
+ struct spi_transfer *last_transfer;
+ pl022->next_msg_cs_active = false;
+
+ last_transfer = list_last_entry(&pl022->cur_msg->transfers,
+ struct spi_transfer, transfer_list);
+
+ /* Delay if requested before any change in chip select */
+ if (last_transfer->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(last_transfer->delay_usecs);
+
+ if (!last_transfer->cs_change) {
+ struct spi_message *next_msg;
+
+ /*
+ * cs_change was not set. We can keep the chip select
+ * enabled if there is message in the queue and it is
+ * for the same spi device.
+ *
+ * We cannot postpone this until pump_messages, because
+ * after calling msg->complete (below) the driver that
+ * sent the current message could be unloaded, which
+ * could invalidate the cs_control() callback...
+ */
+ /* get a pointer to the next message, if any */
+ next_msg = spi_get_next_queued_message(pl022->master);
+
+ /*
+ * see if the next and current messages point
+ * to the same spi device.
+ */
+ if (next_msg && next_msg->spi != pl022->cur_msg->spi)
+ next_msg = NULL;
+ if (!next_msg || pl022->cur_msg->state == STATE_ERROR)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ else
+ pl022->next_msg_cs_active = true;
+
+ }
+
+ pl022->cur_msg = NULL;
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+
+ /* disable the SPI/SSP operation */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ spi_finalize_current_message(pl022->master);
+}
+
+/**
+ * flush - flush the FIFO to reach a clean state
+ * @pl022: SSP driver private data structure
+ */
+static int flush(struct pl022 *pl022)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ dev_dbg(&pl022->adev->dev, "flush\n");
+ do {
+ while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ readw(SSP_DR(pl022->virtbase));
+ } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--);
+
+ pl022->exp_fifo_level = 0;
+
+ return limit;
+}
+
+/**
+ * restore_state - Load configuration of current chip
+ * @pl022: SSP driver private data structure
+ */
+static void restore_state(struct pl022 *pl022)
+{
+ struct chip_data *chip = pl022->cur_chip;
+
+ if (pl022->vendor->extended_cr)
+ writel(chip->cr0, SSP_CR0(pl022->virtbase));
+ else
+ writew(chip->cr0, SSP_CR0(pl022->virtbase));
+ writew(chip->cr1, SSP_CR1(pl022->virtbase));
+ writew(chip->dmacr, SSP_DMACR(pl022->virtbase));
+ writew(chip->cpsr, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/*
+ * Default SSP Register Values
+ */
+#define DEFAULT_SSP_REG_CR0 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
+)
+
+/* ST versions have slightly different bit layout */
+#define DEFAULT_SSP_REG_CR0_ST ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
+ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \
+ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \
+)
+
+/* The PL023 version is slightly different again */
+#define DEFAULT_SSP_REG_CR0_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \
+ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
+ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
+ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \
+)
+
+#define DEFAULT_SSP_REG_CR1 ( \
+ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \
+)
+
+/* ST versions extend this register to use all 16 bits */
+#define DEFAULT_SSP_REG_CR1_ST ( \
+ DEFAULT_SSP_REG_CR1 | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \
+)
+
+/*
+ * The PL023 variant has further differences: no loopback mode, no microwire
+ * support, and a new clock feedback delay setting.
+ */
+#define DEFAULT_SSP_REG_CR1_ST_PL023 ( \
+ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \
+ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \
+ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \
+ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \
+ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \
+ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \
+ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \
+ GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \
+)
+
+#define DEFAULT_SSP_REG_CPSR ( \
+ GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \
+)
+
+#define DEFAULT_SSP_REG_DMACR (\
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \
+ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \
+)
+
+/**
+ * load_ssp_default_config - Load default configuration for SSP
+ * @pl022: SSP driver private data structure
+ */
+static void load_ssp_default_config(struct pl022 *pl022)
+{
+ if (pl022->vendor->pl023) {
+ writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase));
+ } else if (pl022->vendor->extended_cr) {
+ writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase));
+ } else {
+ writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase));
+ }
+ writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase));
+ writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase));
+ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+}
+
+/**
+ * This will write to TX and read from RX according to the parameters
+ * set in pl022.
+ */
+static void readwriter(struct pl022 *pl022)
+{
+
+ /*
+ * The FIFO depth is different between primecell variants.
+ * I believe filling in too much in the FIFO might cause
+ * errons in 8bit wide transfers on ARM variants (just 8 words
+ * FIFO, means only 8x8 = 64 bits in FIFO) at least.
+ *
+ * To prevent this issue, the TX FIFO is only filled to the
+ * unused RX FIFO fill length, regardless of what the TX
+ * FIFO status flag indicates.
+ */
+ dev_dbg(&pl022->adev->dev,
+ "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n",
+ __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end);
+
+ /* Read as much as you can */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level--;
+ }
+ /*
+ * Write as much as possible up to the RX FIFO size
+ */
+ while ((pl022->exp_fifo_level < pl022->vendor->fifodepth)
+ && (pl022->tx < pl022->tx_end)) {
+ switch (pl022->write) {
+ case WRITING_NULL:
+ writew(0x0, SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U8:
+ writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U16:
+ writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase));
+ break;
+ case WRITING_U32:
+ writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->tx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level++;
+ /*
+ * This inner reader takes care of things appearing in the RX
+ * FIFO as we're transmitting. This will happen a lot since the
+ * clock starts running when you put things into the TX FIFO,
+ * and then things are continuously clocked into the RX FIFO.
+ */
+ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE)
+ && (pl022->rx < pl022->rx_end)) {
+ switch (pl022->read) {
+ case READING_NULL:
+ readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U8:
+ *(u8 *) (pl022->rx) =
+ readw(SSP_DR(pl022->virtbase)) & 0xFFU;
+ break;
+ case READING_U16:
+ *(u16 *) (pl022->rx) =
+ (u16) readw(SSP_DR(pl022->virtbase));
+ break;
+ case READING_U32:
+ *(u32 *) (pl022->rx) =
+ readl(SSP_DR(pl022->virtbase));
+ break;
+ }
+ pl022->rx += (pl022->cur_chip->n_bytes);
+ pl022->exp_fifo_level--;
+ }
+ }
+ /*
+ * When we exit here the TX FIFO should be full and the RX FIFO
+ * should be empty
+ */
+}
+
+/**
+ * next_transfer - Move to the Next transfer in the current spi message
+ * @pl022: SSP driver private data structure
+ *
+ * This function moves though the linked list of spi transfers in the
+ * current spi message and returns with the state of current spi
+ * message i.e whether its last transfer is done(STATE_DONE) or
+ * Next transfer is ready(STATE_RUNNING)
+ */
+static void *next_transfer(struct pl022 *pl022)
+{
+ struct spi_message *msg = pl022->cur_msg;
+ struct spi_transfer *trans = pl022->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ pl022->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ return STATE_RUNNING;
+ }
+ return STATE_DONE;
+}
+
+/*
+ * This DMA functionality is only compiled in if we have
+ * access to the generic DMA devices/DMA engine.
+ */
+#ifdef CONFIG_DMA_ENGINE
+static void unmap_free_dma_scatter(struct pl022 *pl022)
+{
+ /* Unmap and free the SG tables */
+ dma_unmap_sg(pl022->dma_tx_channel->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ dma_unmap_sg(pl022->dma_rx_channel->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ sg_free_table(&pl022->sgt_rx);
+ sg_free_table(&pl022->sgt_tx);
+}
+
+static void dma_callback(void *data)
+{
+ struct pl022 *pl022 = data;
+ struct spi_message *msg = pl022->cur_msg;
+
+ BUG_ON(!pl022->sgt_rx.sgl);
+
+#ifdef VERBOSE_DEBUG
+ /*
+ * Optionally dump out buffers to inspect contents, this is
+ * good if you want to convince yourself that the loopback
+ * read/write contents are the same, when adopting to a new
+ * DMA engine.
+ */
+ {
+ struct scatterlist *sg;
+ unsigned int i;
+
+ dma_sync_sg_for_cpu(&pl022->adev->dev,
+ pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents,
+ DMA_FROM_DEVICE);
+
+ for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) {
+ dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI RX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) {
+ dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i);
+ print_hex_dump(KERN_ERR, "SPI TX: ",
+ DUMP_PREFIX_OFFSET,
+ 16,
+ 1,
+ sg_virt(sg),
+ sg_dma_len(sg),
+ 1);
+ }
+ }
+#endif
+
+ unmap_free_dma_scatter(pl022);
+
+ /* Update total bytes transferred */
+ msg->actual_length += pl022->cur_transfer->len;
+ if (pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+
+ /* Move to next transfer */
+ msg->state = next_transfer(pl022);
+ tasklet_schedule(&pl022->pump_transfers);
+}
+
+static void setup_dma_scatter(struct pl022 *pl022,
+ void *buffer,
+ unsigned int length,
+ struct sg_table *sgtab)
+{
+ struct scatterlist *sg;
+ int bytesleft = length;
+ void *bufp = buffer;
+ int mapbytes;
+ int i;
+
+ if (buffer) {
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ /*
+ * If there are less bytes left than what fits
+ * in the current page (plus page alignment offset)
+ * we just feed in this, else we stuff in as much
+ * as we can.
+ */
+ if (bytesleft < (PAGE_SIZE - offset_in_page(bufp)))
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE - offset_in_page(bufp);
+ sg_set_page(sg, virt_to_page(bufp),
+ mapbytes, offset_in_page(bufp));
+ bufp += mapbytes;
+ bytesleft -= mapbytes;
+ dev_dbg(&pl022->adev->dev,
+ "set RX/TX target page @ %p, %d bytes, %d left\n",
+ bufp, mapbytes, bytesleft);
+ }
+ } else {
+ /* Map the dummy buffer on every page */
+ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) {
+ if (bytesleft < PAGE_SIZE)
+ mapbytes = bytesleft;
+ else
+ mapbytes = PAGE_SIZE;
+ sg_set_page(sg, virt_to_page(pl022->dummypage),
+ mapbytes, 0);
+ bytesleft -= mapbytes;
+ dev_dbg(&pl022->adev->dev,
+ "set RX/TX to dummy page %d bytes, %d left\n",
+ mapbytes, bytesleft);
+
+ }
+ }
+ BUG_ON(bytesleft);
+}
+
+/**
+ * configure_dma - configures the channels for the next transfer
+ * @pl022: SSP driver's private data structure
+ */
+static int configure_dma(struct pl022 *pl022)
+{
+ struct dma_slave_config rx_conf = {
+ .src_addr = SSP_DR(pl022->phybase),
+ .direction = DMA_DEV_TO_MEM,
+ .device_fc = false,
+ };
+ struct dma_slave_config tx_conf = {
+ .dst_addr = SSP_DR(pl022->phybase),
+ .direction = DMA_MEM_TO_DEV,
+ .device_fc = false,
+ };
+ unsigned int pages;
+ int ret;
+ int rx_sglen, tx_sglen;
+ struct dma_chan *rxchan = pl022->dma_rx_channel;
+ struct dma_chan *txchan = pl022->dma_tx_channel;
+ struct dma_async_tx_descriptor *rxdesc;
+ struct dma_async_tx_descriptor *txdesc;
+
+ /* Check that the channels are available */
+ if (!rxchan || !txchan)
+ return -ENODEV;
+
+ /*
+ * If supplied, the DMA burstsize should equal the FIFO trigger level.
+ * Notice that the DMA engine uses one-to-one mapping. Since we can
+ * not trigger on 2 elements this needs explicit mapping rather than
+ * calculation.
+ */
+ switch (pl022->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 1;
+ break;
+ case SSP_RX_4_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 4;
+ break;
+ case SSP_RX_8_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 8;
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 16;
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ rx_conf.src_maxburst = 32;
+ break;
+ default:
+ rx_conf.src_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 1;
+ break;
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 4;
+ break;
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 8;
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 16;
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ tx_conf.dst_maxburst = 32;
+ break;
+ default:
+ tx_conf.dst_maxburst = pl022->vendor->fifodepth >> 1;
+ break;
+ }
+
+ switch (pl022->read) {
+ case READING_NULL:
+ /* Use the same as for writing */
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case READING_U8:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case READING_U16:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case READING_U32:
+ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ switch (pl022->write) {
+ case WRITING_NULL:
+ /* Use the same as for reading */
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
+ break;
+ case WRITING_U8:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case WRITING_U16:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ case WRITING_U32:
+ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ /* SPI pecularity: we need to read and write the same width */
+ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ rx_conf.src_addr_width = tx_conf.dst_addr_width;
+ if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED)
+ tx_conf.dst_addr_width = rx_conf.src_addr_width;
+ BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width);
+
+ dmaengine_slave_config(rxchan, &rx_conf);
+ dmaengine_slave_config(txchan, &tx_conf);
+
+ /* Create sglists for the transfers */
+ pages = DIV_ROUND_UP(pl022->cur_transfer->len, PAGE_SIZE);
+ dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages);
+
+ ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_rx_sg;
+
+ ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_ATOMIC);
+ if (ret)
+ goto err_alloc_tx_sg;
+
+ /* Fill in the scatterlists for the RX+TX buffers */
+ setup_dma_scatter(pl022, pl022->rx,
+ pl022->cur_transfer->len, &pl022->sgt_rx);
+ setup_dma_scatter(pl022, pl022->tx,
+ pl022->cur_transfer->len, &pl022->sgt_tx);
+
+ /* Map DMA buffers */
+ rx_sglen = dma_map_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+ if (!rx_sglen)
+ goto err_rx_sgmap;
+
+ tx_sglen = dma_map_sg(txchan->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+ if (!tx_sglen)
+ goto err_tx_sgmap;
+
+ /* Send both scatterlists */
+ rxdesc = dmaengine_prep_slave_sg(rxchan,
+ pl022->sgt_rx.sgl,
+ rx_sglen,
+ DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!rxdesc)
+ goto err_rxdesc;
+
+ txdesc = dmaengine_prep_slave_sg(txchan,
+ pl022->sgt_tx.sgl,
+ tx_sglen,
+ DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!txdesc)
+ goto err_txdesc;
+
+ /* Put the callback on the RX transfer only, that should finish last */
+ rxdesc->callback = dma_callback;
+ rxdesc->callback_param = pl022;
+
+ /* Submit and fire RX and TX with TX last so we're ready to read! */
+ dmaengine_submit(rxdesc);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rxchan);
+ dma_async_issue_pending(txchan);
+ pl022->dma_running = true;
+
+ return 0;
+
+err_txdesc:
+ dmaengine_terminate_all(txchan);
+err_rxdesc:
+ dmaengine_terminate_all(rxchan);
+ dma_unmap_sg(txchan->device->dev, pl022->sgt_tx.sgl,
+ pl022->sgt_tx.nents, DMA_TO_DEVICE);
+err_tx_sgmap:
+ dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
+ pl022->sgt_rx.nents, DMA_FROM_DEVICE);
+err_rx_sgmap:
+ sg_free_table(&pl022->sgt_tx);
+err_alloc_tx_sg:
+ sg_free_table(&pl022->sgt_rx);
+err_alloc_rx_sg:
+ return -ENOMEM;
+}
+
+static int pl022_dma_probe(struct pl022 *pl022)
+{
+ dma_cap_mask_t mask;
+
+ /* Try to acquire a generic DMA engine slave channel */
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+ /*
+ * We need both RX and TX channels to do DMA, else do none
+ * of them.
+ */
+ pl022->dma_rx_channel = dma_request_channel(mask,
+ pl022->master_info->dma_filter,
+ pl022->master_info->dma_rx_param);
+ if (!pl022->dma_rx_channel) {
+ dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n");
+ goto err_no_rxchan;
+ }
+
+ pl022->dma_tx_channel = dma_request_channel(mask,
+ pl022->master_info->dma_filter,
+ pl022->master_info->dma_tx_param);
+ if (!pl022->dma_tx_channel) {
+ dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n");
+ goto err_no_txchan;
+ }
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage)
+ goto err_no_dummypage;
+
+ dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n",
+ dma_chan_name(pl022->dma_rx_channel),
+ dma_chan_name(pl022->dma_tx_channel));
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ dev_err(&pl022->adev->dev,
+ "Failed to work in dma mode, work without dma!\n");
+ return -ENODEV;
+}
+
+static int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ struct device *dev = &pl022->adev->dev;
+
+ /* automatically configure DMA channels from platform, normally using DT */
+ pl022->dma_rx_channel = dma_request_slave_channel(dev, "rx");
+ if (!pl022->dma_rx_channel)
+ goto err_no_rxchan;
+
+ pl022->dma_tx_channel = dma_request_slave_channel(dev, "tx");
+ if (!pl022->dma_tx_channel)
+ goto err_no_txchan;
+
+ pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!pl022->dummypage)
+ goto err_no_dummypage;
+
+ return 0;
+
+err_no_dummypage:
+ dma_release_channel(pl022->dma_tx_channel);
+ pl022->dma_tx_channel = NULL;
+err_no_txchan:
+ dma_release_channel(pl022->dma_rx_channel);
+ pl022->dma_rx_channel = NULL;
+err_no_rxchan:
+ return -ENODEV;
+}
+
+static void terminate_dma(struct pl022 *pl022)
+{
+ struct dma_chan *rxchan = pl022->dma_rx_channel;
+ struct dma_chan *txchan = pl022->dma_tx_channel;
+
+ dmaengine_terminate_all(rxchan);
+ dmaengine_terminate_all(txchan);
+ unmap_free_dma_scatter(pl022);
+ pl022->dma_running = false;
+}
+
+static void pl022_dma_remove(struct pl022 *pl022)
+{
+ if (pl022->dma_running)
+ terminate_dma(pl022);
+ if (pl022->dma_tx_channel)
+ dma_release_channel(pl022->dma_tx_channel);
+ if (pl022->dma_rx_channel)
+ dma_release_channel(pl022->dma_rx_channel);
+ kfree(pl022->dummypage);
+}
+
+#else
+static inline int configure_dma(struct pl022 *pl022)
+{
+ return -ENODEV;
+}
+
+static inline int pl022_dma_autoprobe(struct pl022 *pl022)
+{
+ return 0;
+}
+
+static inline int pl022_dma_probe(struct pl022 *pl022)
+{
+ return 0;
+}
+
+static inline void pl022_dma_remove(struct pl022 *pl022)
+{
+}
+#endif
+
+/**
+ * pl022_interrupt_handler - Interrupt handler for SSP controller
+ *
+ * This function handles interrupts generated for an interrupt based transfer.
+ * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the
+ * current message's state as STATE_ERROR and schedule the tasklet
+ * pump_transfers which will do the postprocessing of the current message by
+ * calling giveback(). Otherwise it reads data from RX FIFO till there is no
+ * more data, and writes data in TX FIFO till it is not full. If we complete
+ * the transfer we move to the next transfer and schedule the tasklet.
+ */
+static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id)
+{
+ struct pl022 *pl022 = dev_id;
+ struct spi_message *msg = pl022->cur_msg;
+ u16 irq_status = 0;
+
+ if (unlikely(!msg)) {
+ dev_err(&pl022->adev->dev,
+ "bad message state in interrupt handler");
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ /* Read the Interrupt Status Register */
+ irq_status = readw(SSP_MIS(pl022->virtbase));
+
+ if (unlikely(!irq_status))
+ return IRQ_NONE;
+
+ /*
+ * This handles the FIFO interrupts, the timeout
+ * interrupts are flatly ignored, they cannot be
+ * trusted.
+ */
+ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) {
+ /*
+ * Overrun interrupt - bail out since our Data has been
+ * corrupted
+ */
+ dev_err(&pl022->adev->dev, "FIFO overrun\n");
+ if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF)
+ dev_err(&pl022->adev->dev,
+ "RXFIFO is full\n");
+
+ /*
+ * Disable and clear interrupts, disable SSP,
+ * mark message with bad status so it can be
+ * retried.
+ */
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+ msg->state = STATE_ERROR;
+
+ /* Schedule message queue handler */
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ readwriter(pl022);
+
+ if (pl022->tx == pl022->tx_end) {
+ /* Disable Transmit interrupt, enable receive interrupt */
+ writew((readw(SSP_IMSC(pl022->virtbase)) &
+ ~SSP_IMSC_MASK_TXIM) | SSP_IMSC_MASK_RXIM,
+ SSP_IMSC(pl022->virtbase));
+ }
+
+ /*
+ * Since all transactions must write as much as shall be read,
+ * we can conclude the entire transaction once RX is complete.
+ * At this point, all TX will always be finished.
+ */
+ if (pl022->rx >= pl022->rx_end) {
+ writew(DISABLE_ALL_INTERRUPTS,
+ SSP_IMSC(pl022->virtbase));
+ writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase));
+ if (unlikely(pl022->rx > pl022->rx_end)) {
+ dev_warn(&pl022->adev->dev, "read %u surplus "
+ "bytes (did you request an odd "
+ "number of bytes on a 16bit bus?)\n",
+ (u32) (pl022->rx - pl022->rx_end));
+ }
+ /* Update total bytes transferred */
+ msg->actual_length += pl022->cur_transfer->len;
+ if (pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ msg->state = next_transfer(pl022);
+ tasklet_schedule(&pl022->pump_transfers);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_HANDLED;
+}
+
+/**
+ * This sets up the pointers to memory for the next message to
+ * send out on the SPI bus.
+ */
+static int set_up_next_transfer(struct pl022 *pl022,
+ struct spi_transfer *transfer)
+{
+ int residue;
+
+ /* Sanity check the message for this bus width */
+ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes;
+ if (unlikely(residue != 0)) {
+ dev_err(&pl022->adev->dev,
+ "message of %u bytes to transmit but the current "
+ "chip bus has a data width of %u bytes!\n",
+ pl022->cur_transfer->len,
+ pl022->cur_chip->n_bytes);
+ dev_err(&pl022->adev->dev, "skipping this message\n");
+ return -EIO;
+ }
+ pl022->tx = (void *)transfer->tx_buf;
+ pl022->tx_end = pl022->tx + pl022->cur_transfer->len;
+ pl022->rx = (void *)transfer->rx_buf;
+ pl022->rx_end = pl022->rx + pl022->cur_transfer->len;
+ pl022->write =
+ pl022->tx ? pl022->cur_chip->write : WRITING_NULL;
+ pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL;
+ return 0;
+}
+
+/**
+ * pump_transfers - Tasklet function which schedules next transfer
+ * when running in interrupt or DMA transfer mode.
+ * @data: SSP driver private data structure
+ *
+ */
+static void pump_transfers(unsigned long data)
+{
+ struct pl022 *pl022 = (struct pl022 *) data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+
+ /* Get current state information */
+ message = pl022->cur_msg;
+ transfer = pl022->cur_transfer;
+
+ /* Handle for abort */
+ if (message->state == STATE_ERROR) {
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == STATE_DONE) {
+ message->status = 0;
+ giveback(pl022);
+ return;
+ }
+
+ /* Delay if requested at end of transfer before CS change */
+ if (message->state == STATE_RUNNING) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ /*
+ * FIXME: This runs in interrupt context.
+ * Is this really smart?
+ */
+ udelay(previous->delay_usecs);
+
+ /* Reselect chip select only if cs_change was requested */
+ if (previous->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ }
+
+ if (set_up_next_transfer(pl022, transfer)) {
+ message->state = STATE_ERROR;
+ message->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* Flush the FIFOs and let's go! */
+ flush(pl022);
+
+ if (pl022->cur_chip->enable_dma) {
+ if (configure_dma(pl022)) {
+ dev_dbg(&pl022->adev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ return;
+ }
+
+err_config_dma:
+ /* enable all interrupts except RX */
+ writew(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM, SSP_IMSC(pl022->virtbase));
+}
+
+static void do_interrupt_dma_transfer(struct pl022 *pl022)
+{
+ /*
+ * Default is to enable all interrupts except RX -
+ * this will be enabled once TX is complete
+ */
+ u32 irqflags = (u32)(ENABLE_ALL_INTERRUPTS & ~SSP_IMSC_MASK_RXIM);
+
+ /* Enable target chip, if not already active */
+ if (!pl022->next_msg_cs_active)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+
+ if (set_up_next_transfer(pl022, pl022->cur_transfer)) {
+ /* Error path */
+ pl022->cur_msg->state = STATE_ERROR;
+ pl022->cur_msg->status = -EIO;
+ giveback(pl022);
+ return;
+ }
+ /* If we're using DMA, set up DMA here */
+ if (pl022->cur_chip->enable_dma) {
+ /* Configure DMA transfer */
+ if (configure_dma(pl022)) {
+ dev_dbg(&pl022->adev->dev,
+ "configuration of DMA failed, fall back to interrupt mode\n");
+ goto err_config_dma;
+ }
+ /* Disable interrupts in DMA mode, IRQ from DMA controller */
+ irqflags = DISABLE_ALL_INTERRUPTS;
+ }
+err_config_dma:
+ /* Enable SSP, turn on interrupts */
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+ writew(irqflags, SSP_IMSC(pl022->virtbase));
+}
+
+static void do_polling_transfer(struct pl022 *pl022)
+{
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip;
+ unsigned long time, timeout;
+
+ chip = pl022->cur_chip;
+ message = pl022->cur_msg;
+
+ while (message->state != STATE_DONE) {
+ /* Handle for abort */
+ if (message->state == STATE_ERROR)
+ break;
+ transfer = pl022->cur_transfer;
+
+ /* Delay if requested at end of transfer */
+ if (message->state == STATE_RUNNING) {
+ previous =
+ list_entry(transfer->transfer_list.prev,
+ struct spi_transfer, transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+ if (previous->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ } else {
+ /* STATE_START */
+ message->state = STATE_RUNNING;
+ if (!pl022->next_msg_cs_active)
+ pl022_cs_control(pl022, SSP_CHIP_SELECT);
+ }
+
+ /* Configuration Changing Per Transfer */
+ if (set_up_next_transfer(pl022, transfer)) {
+ /* Error path */
+ message->state = STATE_ERROR;
+ break;
+ }
+ /* Flush FIFOs and enable SSP */
+ flush(pl022);
+ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE),
+ SSP_CR1(pl022->virtbase));
+
+ dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n");
+
+ timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT);
+ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) {
+ time = jiffies;
+ readwriter(pl022);
+ if (time_after(time, timeout)) {
+ dev_warn(&pl022->adev->dev,
+ "%s: timeout!\n", __func__);
+ message->state = STATE_ERROR;
+ goto out;
+ }
+ cpu_relax();
+ }
+
+ /* Update total byte transferred */
+ message->actual_length += pl022->cur_transfer->len;
+ if (pl022->cur_transfer->cs_change)
+ pl022_cs_control(pl022, SSP_CHIP_DESELECT);
+ /* Move to next transfer */
+ message->state = next_transfer(pl022);
+ }
+out:
+ /* Handle end of message */
+ if (message->state == STATE_DONE)
+ message->status = 0;
+ else
+ message->status = -EIO;
+
+ giveback(pl022);
+ return;
+}
+
+static int pl022_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct pl022 *pl022 = spi_master_get_devdata(master);
+
+ /* Initial message state */
+ pl022->cur_msg = msg;
+ msg->state = STATE_START;
+
+ pl022->cur_transfer = list_entry(msg->transfers.next,
+ struct spi_transfer, transfer_list);
+
+ /* Setup the SPI using the per chip configuration */
+ pl022->cur_chip = spi_get_ctldata(msg->spi);
+ pl022->cur_cs = pl022->chipselects[msg->spi->chip_select];
+
+ restore_state(pl022);
+ flush(pl022);
+
+ if (pl022->cur_chip->xfer_type == POLLING_TRANSFER)
+ do_polling_transfer(pl022);
+ else
+ do_interrupt_dma_transfer(pl022);
+
+ return 0;
+}
+
+static int pl022_unprepare_transfer_hardware(struct spi_master *master)
+{
+ struct pl022 *pl022 = spi_master_get_devdata(master);
+
+ /* nothing more to do - disable spi/ssp and power off */
+ writew((readw(SSP_CR1(pl022->virtbase)) &
+ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
+
+ return 0;
+}
+
+static int verify_controller_parameters(struct pl022 *pl022,
+ struct pl022_config_chip const *chip_info)
+{
+ if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI)
+ || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) {
+ dev_err(&pl022->adev->dev,
+ "interface is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) &&
+ (!pl022->vendor->unidir)) {
+ dev_err(&pl022->adev->dev,
+ "unidirectional mode not supported in this "
+ "hardware version\n");
+ return -EINVAL;
+ }
+ if ((chip_info->hierarchy != SSP_MASTER)
+ && (chip_info->hierarchy != SSP_SLAVE)) {
+ dev_err(&pl022->adev->dev,
+ "hierarchy is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->com_mode != INTERRUPT_TRANSFER)
+ && (chip_info->com_mode != DMA_TRANSFER)
+ && (chip_info->com_mode != POLLING_TRANSFER)) {
+ dev_err(&pl022->adev->dev,
+ "Communication mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->rx_lev_trig) {
+ case SSP_RX_1_OR_MORE_ELEM:
+ case SSP_RX_4_OR_MORE_ELEM:
+ case SSP_RX_8_OR_MORE_ELEM:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_RX_16_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_RX_32_OR_MORE_ELEM:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&pl022->adev->dev,
+ "RX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ switch (chip_info->tx_lev_trig) {
+ case SSP_TX_1_OR_MORE_EMPTY_LOC:
+ case SSP_TX_4_OR_MORE_EMPTY_LOC:
+ case SSP_TX_8_OR_MORE_EMPTY_LOC:
+ /* These are always OK, all variants can handle this */
+ break;
+ case SSP_TX_16_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 16) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ case SSP_TX_32_OR_MORE_EMPTY_LOC:
+ if (pl022->vendor->fifodepth < 32) {
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ break;
+ default:
+ dev_err(&pl022->adev->dev,
+ "TX FIFO Trigger Level is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) {
+ if ((chip_info->ctrl_len < SSP_BITS_4)
+ || (chip_info->ctrl_len > SSP_BITS_32)) {
+ dev_err(&pl022->adev->dev,
+ "CTRL LEN is configured incorrectly\n");
+ return -EINVAL;
+ }
+ if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO)
+ && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) {
+ dev_err(&pl022->adev->dev,
+ "Wait State is configured incorrectly\n");
+ return -EINVAL;
+ }
+ /* Half duplex is only available in the ST Micro version */
+ if (pl022->vendor->extended_cr) {
+ if ((chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ && (chip_info->duplex !=
+ SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) {
+ dev_err(&pl022->adev->dev,
+ "Microwire duplex mode is configured incorrectly\n");
+ return -EINVAL;
+ }
+ } else {
+ if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX)
+ dev_err(&pl022->adev->dev,
+ "Microwire half duplex mode requested,"
+ " but this is only available in the"
+ " ST version of PL022\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+static inline u32 spi_rate(u32 rate, u16 cpsdvsr, u16 scr)
+{
+ return rate / (cpsdvsr * (1 + scr));
+}
+
+static int calculate_effective_freq(struct pl022 *pl022, int freq, struct
+ ssp_clock_params * clk_freq)
+{
+ /* Lets calculate the frequency parameters */
+ u16 cpsdvsr = CPSDVR_MIN, scr = SCR_MIN;
+ u32 rate, max_tclk, min_tclk, best_freq = 0, best_cpsdvsr = 0,
+ best_scr = 0, tmp, found = 0;
+
+ rate = clk_get_rate(pl022->clk);
+ /* cpsdvscr = 2 & scr 0 */
+ max_tclk = spi_rate(rate, CPSDVR_MIN, SCR_MIN);
+ /* cpsdvsr = 254 & scr = 255 */
+ min_tclk = spi_rate(rate, CPSDVR_MAX, SCR_MAX);
+
+ if (freq > max_tclk)
+ dev_warn(&pl022->adev->dev,
+ "Max speed that can be programmed is %d Hz, you requested %d\n",
+ max_tclk, freq);
+
+ if (freq < min_tclk) {
+ dev_err(&pl022->adev->dev,
+ "Requested frequency: %d Hz is less than minimum possible %d Hz\n",
+ freq, min_tclk);
+ return -EINVAL;
+ }
+
+ /*
+ * best_freq will give closest possible available rate (<= requested
+ * freq) for all values of scr & cpsdvsr.
+ */
+ while ((cpsdvsr <= CPSDVR_MAX) && !found) {
+ while (scr <= SCR_MAX) {
+ tmp = spi_rate(rate, cpsdvsr, scr);
+
+ if (tmp > freq) {
+ /* we need lower freq */
+ scr++;
+ continue;
+ }
+
+ /*
+ * If found exact value, mark found and break.
+ * If found more closer value, update and break.
+ */
+ if (tmp > best_freq) {
+ best_freq = tmp;
+ best_cpsdvsr = cpsdvsr;
+ best_scr = scr;
+
+ if (tmp == freq)
+ found = 1;
+ }
+ /*
+ * increased scr will give lower rates, which are not
+ * required
+ */
+ break;
+ }
+ cpsdvsr += 2;
+ scr = SCR_MIN;
+ }
+
+ WARN(!best_freq, "pl022: Matching cpsdvsr and scr not found for %d Hz rate \n",
+ freq);
+
+ clk_freq->cpsdvsr = (u8) (best_cpsdvsr & 0xFF);
+ clk_freq->scr = (u8) (best_scr & 0xFF);
+ dev_dbg(&pl022->adev->dev,
+ "SSP Target Frequency is: %u, Effective Frequency is %u\n",
+ freq, best_freq);
+ dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n",
+ clk_freq->cpsdvsr, clk_freq->scr);
+
+ return 0;
+}
+
+/*
+ * A piece of default chip info unless the platform
+ * supplies it.
+ */
+static const struct pl022_config_chip pl022_default_chip_info = {
+ .com_mode = POLLING_TRANSFER,
+ .iface = SSP_INTERFACE_MOTOROLA_SPI,
+ .hierarchy = SSP_SLAVE,
+ .slave_tx_disable = DO_NOT_DRIVE_TX,
+ .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM,
+ .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC,
+ .ctrl_len = SSP_BITS_8,
+ .wait_state = SSP_MWIRE_WAIT_ZERO,
+ .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX,
+ .cs_control = null_cs_control,
+};
+
+/**
+ * pl022_setup - setup function registered to SPI master framework
+ * @spi: spi device which is requesting setup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. If it is the first time when setup is called by this device,
+ * this function will initialize the runtime state for this chip and save
+ * the same in the device structure. Else it will update the runtime info
+ * with the updated chip info. Nothing is really being written to the
+ * controller hardware here, that is not done until the actual transfer
+ * commence.
+ */
+static int pl022_setup(struct spi_device *spi)
+{
+ struct pl022_config_chip const *chip_info;
+ struct pl022_config_chip chip_info_dt;
+ struct chip_data *chip;
+ struct ssp_clock_params clk_freq = { .cpsdvsr = 0, .scr = 0};
+ int status = 0;
+ struct pl022 *pl022 = spi_master_get_devdata(spi->master);
+ unsigned int bits = spi->bits_per_word;
+ u32 tmp;
+ struct device_node *np = spi->dev.of_node;
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ /* Get controller_state if one is supplied */
+ chip = spi_get_ctldata(spi);
+
+ if (chip == NULL) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+ dev_dbg(&spi->dev,
+ "allocated memory for controller's runtime state\n");
+ }
+
+ /* Get controller data if one is supplied */
+ chip_info = spi->controller_data;
+
+ if (chip_info == NULL) {
+ if (np) {
+ chip_info_dt = pl022_default_chip_info;
+
+ chip_info_dt.hierarchy = SSP_MASTER;
+ of_property_read_u32(np, "pl022,interface",
+ &chip_info_dt.iface);
+ of_property_read_u32(np, "pl022,com-mode",
+ &chip_info_dt.com_mode);
+ of_property_read_u32(np, "pl022,rx-level-trig",
+ &chip_info_dt.rx_lev_trig);
+ of_property_read_u32(np, "pl022,tx-level-trig",
+ &chip_info_dt.tx_lev_trig);
+ of_property_read_u32(np, "pl022,ctrl-len",
+ &chip_info_dt.ctrl_len);
+ of_property_read_u32(np, "pl022,wait-state",
+ &chip_info_dt.wait_state);
+ of_property_read_u32(np, "pl022,duplex",
+ &chip_info_dt.duplex);
+
+ chip_info = &chip_info_dt;
+ } else {
+ chip_info = &pl022_default_chip_info;
+ /* spi_board_info.controller_data not is supplied */
+ dev_dbg(&spi->dev,
+ "using default controller_data settings\n");
+ }
+ } else
+ dev_dbg(&spi->dev,
+ "using user supplied controller_data settings\n");
+
+ /*
+ * We can override with custom divisors, else we use the board
+ * frequency setting
+ */
+ if ((0 == chip_info->clk_freq.cpsdvsr)
+ && (0 == chip_info->clk_freq.scr)) {
+ status = calculate_effective_freq(pl022,
+ spi->max_speed_hz,
+ &clk_freq);
+ if (status < 0)
+ goto err_config_params;
+ } else {
+ memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq));
+ if ((clk_freq.cpsdvsr % 2) != 0)
+ clk_freq.cpsdvsr =
+ clk_freq.cpsdvsr - 1;
+ }
+ if ((clk_freq.cpsdvsr < CPSDVR_MIN)
+ || (clk_freq.cpsdvsr > CPSDVR_MAX)) {
+ status = -EINVAL;
+ dev_err(&spi->dev,
+ "cpsdvsr is configured incorrectly\n");
+ goto err_config_params;
+ }
+
+ status = verify_controller_parameters(pl022, chip_info);
+ if (status) {
+ dev_err(&spi->dev, "controller data is incorrect");
+ goto err_config_params;
+ }
+
+ pl022->rx_lev_trig = chip_info->rx_lev_trig;
+ pl022->tx_lev_trig = chip_info->tx_lev_trig;
+
+ /* Now set controller state based on controller data */
+ chip->xfer_type = chip_info->com_mode;
+ if (!chip_info->cs_control) {
+ chip->cs_control = null_cs_control;
+ if (!gpio_is_valid(pl022->chipselects[spi->chip_select]))
+ dev_warn(&spi->dev,
+ "invalid chip select\n");
+ } else
+ chip->cs_control = chip_info->cs_control;
+
+ /* Check bits per word with vendor specific range */
+ if ((bits <= 3) || (bits > pl022->vendor->max_bpw)) {
+ status = -ENOTSUPP;
+ dev_err(&spi->dev, "illegal data size for this controller!\n");
+ dev_err(&spi->dev, "This controller can only handle 4 <= n <= %d bit words\n",
+ pl022->vendor->max_bpw);
+ goto err_config_params;
+ } else if (bits <= 8) {
+ dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n");
+ chip->n_bytes = 1;
+ chip->read = READING_U8;
+ chip->write = WRITING_U8;
+ } else if (bits <= 16) {
+ dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n");
+ chip->n_bytes = 2;
+ chip->read = READING_U16;
+ chip->write = WRITING_U16;
+ } else {
+ dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n");
+ chip->n_bytes = 4;
+ chip->read = READING_U32;
+ chip->write = WRITING_U32;
+ }
+
+ /* Now Initialize all register settings required for this chip */
+ chip->cr0 = 0;
+ chip->cr1 = 0;
+ chip->dmacr = 0;
+ chip->cpsr = 0;
+ if ((chip_info->com_mode == DMA_TRANSFER)
+ && ((pl022->master_info)->enable_dma)) {
+ chip->enable_dma = true;
+ dev_dbg(&spi->dev, "DMA mode set in controller state\n");
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ } else {
+ chip->enable_dma = false;
+ dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n");
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_RXDMAE, 0);
+ SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED,
+ SSP_DMACR_MASK_TXDMAE, 1);
+ }
+
+ chip->cpsr = clk_freq.cpsdvsr;
+
+ /* Special setup for the ST micro extended control registers */
+ if (pl022->vendor->extended_cr) {
+ u32 etx;
+
+ if (pl022->vendor->pl023) {
+ /* These bits are only in the PL023 */
+ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay,
+ SSP_CR1_MASK_FBCLKDEL_ST, 13);
+ } else {
+ /* These bits are in the PL022 but not PL023 */
+ SSP_WRITE_BITS(chip->cr0, chip_info->duplex,
+ SSP_CR0_MASK_HALFDUP_ST, 5);
+ SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len,
+ SSP_CR0_MASK_CSS_ST, 16);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF_ST, 21);
+ SSP_WRITE_BITS(chip->cr1, chip_info->wait_state,
+ SSP_CR1_MASK_MWAIT_ST, 6);
+ }
+ SSP_WRITE_BITS(chip->cr0, bits - 1,
+ SSP_CR0_MASK_DSS_ST, 0);
+
+ if (spi->mode & SPI_LSB_FIRST) {
+ tmp = SSP_RX_LSB;
+ etx = SSP_TX_LSB;
+ } else {
+ tmp = SSP_RX_MSB;
+ etx = SSP_TX_MSB;
+ }
+ SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4);
+ SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5);
+ SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig,
+ SSP_CR1_MASK_RXIFLSEL_ST, 7);
+ SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig,
+ SSP_CR1_MASK_TXIFLSEL_ST, 10);
+ } else {
+ SSP_WRITE_BITS(chip->cr0, bits - 1,
+ SSP_CR0_MASK_DSS, 0);
+ SSP_WRITE_BITS(chip->cr0, chip_info->iface,
+ SSP_CR0_MASK_FRF, 4);
+ }
+
+ /* Stuff that is common for all versions */
+ if (spi->mode & SPI_CPOL)
+ tmp = SSP_CLK_POL_IDLE_HIGH;
+ else
+ tmp = SSP_CLK_POL_IDLE_LOW;
+ SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6);
+
+ if (spi->mode & SPI_CPHA)
+ tmp = SSP_CLK_SECOND_EDGE;
+ else
+ tmp = SSP_CLK_FIRST_EDGE;
+ SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7);
+
+ SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8);
+ /* Loopback is available on all versions except PL023 */
+ if (pl022->vendor->loopback) {
+ if (spi->mode & SPI_LOOP)
+ tmp = LOOPBACK_ENABLED;
+ else
+ tmp = LOOPBACK_DISABLED;
+ SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0);
+ }
+ SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1);
+ SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2);
+ SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD,
+ 3);
+
+ /* Save controller_state */
+ spi_set_ctldata(spi, chip);
+ return status;
+ err_config_params:
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+ return status;
+}
+
+/**
+ * pl022_cleanup - cleanup function registered to SPI master framework
+ * @spi: spi device which is requesting cleanup
+ *
+ * This function is registered to the SPI framework for this SPI master
+ * controller. It will free the runtime state of chip.
+ */
+static void pl022_cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+
+ spi_set_ctldata(spi, NULL);
+ kfree(chip);
+}
+
+static struct pl022_ssp_controller *
+pl022_platform_data_dt_get(struct device *dev)
+{
+ struct device_node *np = dev->of_node;
+ struct pl022_ssp_controller *pd;
+ u32 tmp;
+
+ if (!np) {
+ dev_err(dev, "no dt node defined\n");
+ return NULL;
+ }
+
+ pd = devm_kzalloc(dev, sizeof(struct pl022_ssp_controller), GFP_KERNEL);
+ if (!pd)
+ return NULL;
+
+ pd->bus_id = -1;
+ pd->enable_dma = 1;
+ of_property_read_u32(np, "num-cs", &tmp);
+ pd->num_chipselect = tmp;
+ of_property_read_u32(np, "pl022,autosuspend-delay",
+ &pd->autosuspend_delay);
+ pd->rt = of_property_read_bool(np, "pl022,rt");
+
+ return pd;
+}
+
+static int pl022_probe(struct amba_device *adev, const struct amba_id *id)
+{
+ struct device *dev = &adev->dev;
+ struct pl022_ssp_controller *platform_info =
+ dev_get_platdata(&adev->dev);
+ struct spi_master *master;
+ struct pl022 *pl022 = NULL; /*Data for this driver */
+ struct device_node *np = adev->dev.of_node;
+ int status = 0, i, num_cs;
+
+ dev_info(&adev->dev,
+ "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid);
+ if (!platform_info && IS_ENABLED(CONFIG_OF))
+ platform_info = pl022_platform_data_dt_get(dev);
+
+ if (!platform_info) {
+ dev_err(dev, "probe: no platform data defined\n");
+ return -ENODEV;
+ }
+
+ if (platform_info->num_chipselect) {
+ num_cs = platform_info->num_chipselect;
+ } else {
+ dev_err(dev, "probe: no chip select defined\n");
+ return -ENODEV;
+ }
+
+ /* Allocate master with space for data */
+ master = spi_alloc_master(dev, sizeof(struct pl022));
+ if (master == NULL) {
+ dev_err(&adev->dev, "probe - cannot alloc SPI master\n");
+ return -ENOMEM;
+ }
+
+ pl022 = spi_master_get_devdata(master);
+ pl022->master = master;
+ pl022->master_info = platform_info;
+ pl022->adev = adev;
+ pl022->vendor = id->data;
+ pl022->chipselects = devm_kzalloc(dev, num_cs * sizeof(int),
+ GFP_KERNEL);
+ if (!pl022->chipselects) {
+ status = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ /*
+ * Bus Number Which has been Assigned to this SSP controller
+ * on this board
+ */
+ master->bus_num = platform_info->bus_id;
+ master->num_chipselect = num_cs;
+ master->cleanup = pl022_cleanup;
+ master->setup = pl022_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = pl022_transfer_one_message;
+ master->unprepare_transfer_hardware = pl022_unprepare_transfer_hardware;
+ master->rt = platform_info->rt;
+ master->dev.of_node = dev->of_node;
+
+ if (platform_info->num_chipselect && platform_info->chipselects) {
+ for (i = 0; i < num_cs; i++)
+ pl022->chipselects[i] = platform_info->chipselects[i];
+ } else if (pl022->vendor->internal_cs_ctrl) {
+ for (i = 0; i < num_cs; i++)
+ pl022->chipselects[i] = i;
+ } else if (IS_ENABLED(CONFIG_OF)) {
+ for (i = 0; i < num_cs; i++) {
+ int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
+
+ if (cs_gpio == -EPROBE_DEFER) {
+ status = -EPROBE_DEFER;
+ goto err_no_gpio;
+ }
+
+ pl022->chipselects[i] = cs_gpio;
+
+ if (gpio_is_valid(cs_gpio)) {
+ if (devm_gpio_request(dev, cs_gpio, "ssp-pl022"))
+ dev_err(&adev->dev,
+ "could not request %d gpio\n",
+ cs_gpio);
+ else if (gpio_direction_output(cs_gpio, 1))
+ dev_err(&adev->dev,
+ "could not set gpio %d as output\n",
+ cs_gpio);
+ }
+ }
+ }
+
+ /*
+ * Supports mode 0-3, loopback, and active low CS. Transfers are
+ * always MS bit first on the original pl022.
+ */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ if (pl022->vendor->extended_cr)
+ master->mode_bits |= SPI_LSB_FIRST;
+
+ dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num);
+
+ status = amba_request_regions(adev, NULL);
+ if (status)
+ goto err_no_ioregion;
+
+ pl022->phybase = adev->res.start;
+ pl022->virtbase = devm_ioremap(dev, adev->res.start,
+ resource_size(&adev->res));
+ if (pl022->virtbase == NULL) {
+ status = -ENOMEM;
+ goto err_no_ioremap;
+ }
+ dev_info(&adev->dev, "mapped registers from %pa to %p\n",
+ &adev->res.start, pl022->virtbase);
+
+ pl022->clk = devm_clk_get(&adev->dev, NULL);
+ if (IS_ERR(pl022->clk)) {
+ status = PTR_ERR(pl022->clk);
+ dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n");
+ goto err_no_clk;
+ }
+
+ status = clk_prepare_enable(pl022->clk);
+ if (status) {
+ dev_err(&adev->dev, "could not enable SSP/SPI bus clock\n");
+ goto err_no_clk_en;
+ }
+
+ /* Initialize transfer pump */
+ tasklet_init(&pl022->pump_transfers, pump_transfers,
+ (unsigned long)pl022);
+
+ /* Disable SSP */
+ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)),
+ SSP_CR1(pl022->virtbase));
+ load_ssp_default_config(pl022);
+
+ status = devm_request_irq(dev, adev->irq[0], pl022_interrupt_handler,
+ 0, "pl022", pl022);
+ if (status < 0) {
+ dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status);
+ goto err_no_irq;
+ }
+
+ /* Get DMA channels, try autoconfiguration first */
+ status = pl022_dma_autoprobe(pl022);
+
+ /* If that failed, use channels from platform_info */
+ if (status == 0)
+ platform_info->enable_dma = 1;
+ else if (platform_info->enable_dma) {
+ status = pl022_dma_probe(pl022);
+ if (status != 0)
+ platform_info->enable_dma = 0;
+ }
+
+ /* Register with the SPI framework */
+ amba_set_drvdata(adev, pl022);
+ status = devm_spi_register_master(&adev->dev, master);
+ if (status != 0) {
+ dev_err(&adev->dev,
+ "probe - problem registering spi master\n");
+ goto err_spi_register;
+ }
+ dev_dbg(dev, "probe succeeded\n");
+
+ /* let runtime pm put suspend */
+ if (platform_info->autosuspend_delay > 0) {
+ dev_info(&adev->dev,
+ "will use autosuspend for runtime pm, delay %dms\n",
+ platform_info->autosuspend_delay);
+ pm_runtime_set_autosuspend_delay(dev,
+ platform_info->autosuspend_delay);
+ pm_runtime_use_autosuspend(dev);
+ }
+ pm_runtime_put(dev);
+
+ return 0;
+
+ err_spi_register:
+ if (platform_info->enable_dma)
+ pl022_dma_remove(pl022);
+ err_no_irq:
+ clk_disable_unprepare(pl022->clk);
+ err_no_clk_en:
+ err_no_clk:
+ err_no_ioremap:
+ amba_release_regions(adev);
+ err_no_ioregion:
+ err_no_gpio:
+ err_no_mem:
+ spi_master_put(master);
+ return status;
+}
+
+static int
+pl022_remove(struct amba_device *adev)
+{
+ struct pl022 *pl022 = amba_get_drvdata(adev);
+
+ if (!pl022)
+ return 0;
+
+ /*
+ * undo pm_runtime_put() in probe. I assume that we're not
+ * accessing the primecell here.
+ */
+ pm_runtime_get_noresume(&adev->dev);
+
+ load_ssp_default_config(pl022);
+ if (pl022->master_info->enable_dma)
+ pl022_dma_remove(pl022);
+
+ clk_disable_unprepare(pl022->clk);
+ amba_release_regions(adev);
+ tasklet_disable(&pl022->pump_transfers);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pl022_suspend(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(pl022->master);
+ if (ret) {
+ dev_warn(dev, "cannot suspend master\n");
+ return ret;
+ }
+
+ ret = pm_runtime_force_suspend(dev);
+ if (ret) {
+ spi_master_resume(pl022->master);
+ return ret;
+ }
+
+ pinctrl_pm_select_sleep_state(dev);
+
+ dev_dbg(dev, "suspended\n");
+ return 0;
+}
+
+static int pl022_resume(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+ int ret;
+
+ ret = pm_runtime_force_resume(dev);
+ if (ret)
+ dev_err(dev, "problem resuming\n");
+
+ /* Start the queue running */
+ ret = spi_master_resume(pl022->master);
+ if (ret)
+ dev_err(dev, "problem starting queue (%d)\n", ret);
+ else
+ dev_dbg(dev, "resumed\n");
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pl022_runtime_suspend(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(pl022->clk);
+ pinctrl_pm_select_idle_state(dev);
+
+ return 0;
+}
+
+static int pl022_runtime_resume(struct device *dev)
+{
+ struct pl022 *pl022 = dev_get_drvdata(dev);
+
+ pinctrl_pm_select_default_state(dev);
+ clk_prepare_enable(pl022->clk);
+
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pl022_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pl022_suspend, pl022_resume)
+ SET_RUNTIME_PM_OPS(pl022_runtime_suspend, pl022_runtime_resume, NULL)
+};
+
+static struct vendor_data vendor_arm = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_st = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+ .extended_cr = true,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_st_pl023 = {
+ .fifodepth = 32,
+ .max_bpw = 32,
+ .unidir = false,
+ .extended_cr = true,
+ .pl023 = true,
+ .loopback = false,
+ .internal_cs_ctrl = false,
+};
+
+static struct vendor_data vendor_lsi = {
+ .fifodepth = 8,
+ .max_bpw = 16,
+ .unidir = false,
+ .extended_cr = false,
+ .pl023 = false,
+ .loopback = true,
+ .internal_cs_ctrl = true,
+};
+
+static struct amba_id pl022_ids[] = {
+ {
+ /*
+ * ARM PL022 variant, this has a 16bit wide
+ * and 8 locations deep TX/RX FIFO
+ */
+ .id = 0x00041022,
+ .mask = 0x000fffff,
+ .data = &vendor_arm,
+ },
+ {
+ /*
+ * ST Micro derivative, this has 32bit wide
+ * and 32 locations deep TX/RX FIFO
+ */
+ .id = 0x01080022,
+ .mask = 0xffffffff,
+ .data = &vendor_st,
+ },
+ {
+ /*
+ * ST-Ericsson derivative "PL023" (this is not
+ * an official ARM number), this is a PL022 SSP block
+ * stripped to SPI mode only, it has 32bit wide
+ * and 32 locations deep TX/RX FIFO but no extended
+ * CR0/CR1 register
+ */
+ .id = 0x00080023,
+ .mask = 0xffffffff,
+ .data = &vendor_st_pl023,
+ },
+ {
+ /*
+ * PL022 variant that has a chip select control register whih
+ * allows control of 5 output signals nCS[0:4].
+ */
+ .id = 0x000b6022,
+ .mask = 0x000fffff,
+ .data = &vendor_lsi,
+ },
+ { 0, 0 },
+};
+
+MODULE_DEVICE_TABLE(amba, pl022_ids);
+
+static struct amba_driver pl022_driver = {
+ .drv = {
+ .name = "ssp-pl022",
+ .pm = &pl022_dev_pm_ops,
+ },
+ .id_table = pl022_ids,
+ .probe = pl022_probe,
+ .remove = pl022_remove,
+};
+
+static int __init pl022_init(void)
+{
+ return amba_driver_register(&pl022_driver);
+}
+subsys_initcall(pl022_init);
+
+static void __exit pl022_exit(void)
+{
+ amba_driver_unregister(&pl022_driver);
+}
+module_exit(pl022_exit);
+
+MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>");
+MODULE_DESCRIPTION("PL022 SSP Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-ppc4xx.c b/drivers/spi/spi-ppc4xx.c
new file mode 100644
index 000000000..54fb984a3
--- /dev/null
+++ b/drivers/spi/spi-ppc4xx.c
@@ -0,0 +1,585 @@
+/*
+ * SPI_PPC4XX SPI controller driver.
+ *
+ * Copyright (C) 2007 Gary Jennejohn <garyj@denx.de>
+ * Copyright 2008 Stefan Roese <sr@denx.de>, DENX Software Engineering
+ * Copyright 2009 Harris Corporation, Steven A. Falco <sfalco@harris.com>
+ *
+ * Based in part on drivers/spi/spi_s3c24xx.c
+ *
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published
+ * by the Free Software Foundation.
+ */
+
+/*
+ * The PPC4xx SPI controller has no FIFO so each sent/received byte will
+ * generate an interrupt to the CPU. This can cause high CPU utilization.
+ * This driver allows platforms to reduce the interrupt load on the CPU
+ * during SPI transfers by setting max_speed_hz via the device tree.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#include <asm/io.h>
+#include <asm/dcr.h>
+#include <asm/dcr-regs.h>
+
+/* bits in mode register - bit 0 is MSb */
+
+/*
+ * SPI_PPC4XX_MODE_SCP = 0 means "data latched on trailing edge of clock"
+ * SPI_PPC4XX_MODE_SCP = 1 means "data latched on leading edge of clock"
+ * Note: This is the inverse of CPHA.
+ */
+#define SPI_PPC4XX_MODE_SCP (0x80 >> 3)
+
+/* SPI_PPC4XX_MODE_SPE = 1 means "port enabled" */
+#define SPI_PPC4XX_MODE_SPE (0x80 >> 4)
+
+/*
+ * SPI_PPC4XX_MODE_RD = 0 means "MSB first" - this is the normal mode
+ * SPI_PPC4XX_MODE_RD = 1 means "LSB first" - this is bit-reversed mode
+ * Note: This is identical to SPI_LSB_FIRST.
+ */
+#define SPI_PPC4XX_MODE_RD (0x80 >> 5)
+
+/*
+ * SPI_PPC4XX_MODE_CI = 0 means "clock idles low"
+ * SPI_PPC4XX_MODE_CI = 1 means "clock idles high"
+ * Note: This is identical to CPOL.
+ */
+#define SPI_PPC4XX_MODE_CI (0x80 >> 6)
+
+/*
+ * SPI_PPC4XX_MODE_IL = 0 means "loopback disable"
+ * SPI_PPC4XX_MODE_IL = 1 means "loopback enable"
+ */
+#define SPI_PPC4XX_MODE_IL (0x80 >> 7)
+
+/* bits in control register */
+/* starts a transfer when set */
+#define SPI_PPC4XX_CR_STR (0x80 >> 7)
+
+/* bits in status register */
+/* port is busy with a transfer */
+#define SPI_PPC4XX_SR_BSY (0x80 >> 6)
+/* RxD ready */
+#define SPI_PPC4XX_SR_RBR (0x80 >> 7)
+
+/* clock settings (SCP and CI) for various SPI modes */
+#define SPI_CLK_MODE0 (SPI_PPC4XX_MODE_SCP | 0)
+#define SPI_CLK_MODE1 (0 | 0)
+#define SPI_CLK_MODE2 (SPI_PPC4XX_MODE_SCP | SPI_PPC4XX_MODE_CI)
+#define SPI_CLK_MODE3 (0 | SPI_PPC4XX_MODE_CI)
+
+#define DRIVER_NAME "spi_ppc4xx_of"
+
+struct spi_ppc4xx_regs {
+ u8 mode;
+ u8 rxd;
+ u8 txd;
+ u8 cr;
+ u8 sr;
+ u8 dummy;
+ /*
+ * Clock divisor modulus register
+ * This uses the following formula:
+ * SCPClkOut = OPBCLK/(4(CDM + 1))
+ * or
+ * CDM = (OPBCLK/4*SCPClkOut) - 1
+ * bit 0 is the MSb!
+ */
+ u8 cdm;
+};
+
+/* SPI Controller driver's private data. */
+struct ppc4xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ u64 mapbase;
+ u64 mapsize;
+ int irqnum;
+ /* need this to set the SPI clock */
+ unsigned int opb_freq;
+
+ /* for transfers */
+ int len;
+ int count;
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ int *gpios;
+
+ struct spi_ppc4xx_regs __iomem *regs; /* pointer to the registers */
+ struct spi_master *master;
+ struct device *dev;
+};
+
+/* need this so we can set the clock in the chipselect routine */
+struct spi_ppc4xx_cs {
+ u8 mode;
+};
+
+static int spi_ppc4xx_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw;
+ u8 data;
+
+ dev_dbg(&spi->dev, "txrx: tx %p, rx %p, len %d\n",
+ t->tx_buf, t->rx_buf, t->len);
+
+ hw = spi_master_get_devdata(spi->master);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ /* send the first byte */
+ data = hw->tx ? hw->tx[0] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ wait_for_completion(&hw->done);
+
+ return hw->count;
+}
+
+static int spi_ppc4xx_setupxfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+ int scr;
+ u8 cdm = 0;
+ u32 speed;
+ u8 bits_per_word;
+
+ /* Start with the generic configuration for this device. */
+ bits_per_word = spi->bits_per_word;
+ speed = spi->max_speed_hz;
+
+ /*
+ * Modify the configuration if the transfer overrides it. Do not allow
+ * the transfer to overwrite the generic configuration with zeros.
+ */
+ if (t) {
+ if (t->bits_per_word)
+ bits_per_word = t->bits_per_word;
+
+ if (t->speed_hz)
+ speed = min(t->speed_hz, spi->max_speed_hz);
+ }
+
+ if (!speed || (speed > spi->max_speed_hz)) {
+ dev_err(&spi->dev, "invalid speed_hz (%d)\n", speed);
+ return -EINVAL;
+ }
+
+ /* Write new configuration */
+ out_8(&hw->regs->mode, cs->mode);
+
+ /* Set the clock */
+ /* opb_freq was already divided by 4 */
+ scr = (hw->opb_freq / speed) - 1;
+ if (scr > 0)
+ cdm = min(scr, 0xff);
+
+ dev_dbg(&spi->dev, "setting pre-scaler to %d (hz %d)\n", cdm, speed);
+
+ if (in_8(&hw->regs->cdm) != cdm)
+ out_8(&hw->regs->cdm, cdm);
+
+ spin_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* Need to ndelay here? */
+ }
+ spin_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static int spi_ppc4xx_setup(struct spi_device *spi)
+{
+ struct spi_ppc4xx_cs *cs = spi->controller_state;
+
+ if (!spi->max_speed_hz) {
+ dev_err(&spi->dev, "invalid max_speed_hz (must be non-zero)\n");
+ return -EINVAL;
+ }
+
+ if (cs == NULL) {
+ cs = kzalloc(sizeof *cs, GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+ spi->controller_state = cs;
+ }
+
+ /*
+ * We set all bits of the SPI0_MODE register, so,
+ * no need to read-modify-write
+ */
+ cs->mode = SPI_PPC4XX_MODE_SPE;
+
+ switch (spi->mode & (SPI_CPHA | SPI_CPOL)) {
+ case SPI_MODE_0:
+ cs->mode |= SPI_CLK_MODE0;
+ break;
+ case SPI_MODE_1:
+ cs->mode |= SPI_CLK_MODE1;
+ break;
+ case SPI_MODE_2:
+ cs->mode |= SPI_CLK_MODE2;
+ break;
+ case SPI_MODE_3:
+ cs->mode |= SPI_CLK_MODE3;
+ break;
+ }
+
+ if (spi->mode & SPI_LSB_FIRST)
+ cs->mode |= SPI_PPC4XX_MODE_RD;
+
+ return 0;
+}
+
+static void spi_ppc4xx_chipsel(struct spi_device *spi, int value)
+{
+ struct ppc4xx_spi *hw = spi_master_get_devdata(spi->master);
+ unsigned int cs = spi->chip_select;
+ unsigned int cspol;
+
+ /*
+ * If there are no chip selects at all, or if this is the special
+ * case of a non-existent (dummy) chip select, do nothing.
+ */
+
+ if (!hw->master->num_chipselect || hw->gpios[cs] == -EEXIST)
+ return;
+
+ cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+ if (value == BITBANG_CS_INACTIVE)
+ cspol = !cspol;
+
+ gpio_set_value(hw->gpios[cs], cspol);
+}
+
+static irqreturn_t spi_ppc4xx_int(int irq, void *dev_id)
+{
+ struct ppc4xx_spi *hw;
+ u8 status;
+ u8 data;
+ unsigned int count;
+
+ hw = (struct ppc4xx_spi *)dev_id;
+
+ status = in_8(&hw->regs->sr);
+ if (!status)
+ return IRQ_NONE;
+
+ /*
+ * BSY de-asserts one cycle after the transfer is complete. The
+ * interrupt is asserted after the transfer is complete. The exact
+ * relationship is not documented, hence this code.
+ */
+
+ if (unlikely(status & SPI_PPC4XX_SR_BSY)) {
+ u8 lstatus;
+ int cnt = 0;
+
+ dev_dbg(hw->dev, "got interrupt but spi still busy?\n");
+ do {
+ ndelay(10);
+ lstatus = in_8(&hw->regs->sr);
+ } while (++cnt < 100 && lstatus & SPI_PPC4XX_SR_BSY);
+
+ if (cnt >= 100) {
+ dev_err(hw->dev, "busywait: too many loops!\n");
+ complete(&hw->done);
+ return IRQ_HANDLED;
+ } else {
+ /* status is always 1 (RBR) here */
+ status = in_8(&hw->regs->sr);
+ dev_dbg(hw->dev, "loops %d status %x\n", cnt, status);
+ }
+ }
+
+ count = hw->count;
+ hw->count++;
+
+ /* RBR triggered this interrupt. Therefore, data must be ready. */
+ data = in_8(&hw->regs->rxd);
+ if (hw->rx)
+ hw->rx[count] = data;
+
+ count++;
+
+ if (count < hw->len) {
+ data = hw->tx ? hw->tx[count] : 0;
+ out_8(&hw->regs->txd, data);
+ out_8(&hw->regs->cr, SPI_PPC4XX_CR_STR);
+ } else {
+ complete(&hw->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void spi_ppc4xx_cleanup(struct spi_device *spi)
+{
+ kfree(spi->controller_state);
+}
+
+static void spi_ppc4xx_enable(struct ppc4xx_spi *hw)
+{
+ /*
+ * On all 4xx PPC's the SPI bus is shared/multiplexed with
+ * the 2nd I2C bus. We need to enable the the SPI bus before
+ * using it.
+ */
+
+ /* need to clear bit 14 to enable SPC */
+ dcri_clrset(SDR0, SDR0_PFC1, 0x80000000 >> 14, 0);
+}
+
+static void free_gpios(struct ppc4xx_spi *hw)
+{
+ if (hw->master->num_chipselect) {
+ int i;
+ for (i = 0; i < hw->master->num_chipselect; i++)
+ if (gpio_is_valid(hw->gpios[i]))
+ gpio_free(hw->gpios[i]);
+
+ kfree(hw->gpios);
+ hw->gpios = NULL;
+ }
+}
+
+/*
+ * platform_device layer stuff...
+ */
+static int spi_ppc4xx_of_probe(struct platform_device *op)
+{
+ struct ppc4xx_spi *hw;
+ struct spi_master *master;
+ struct spi_bitbang *bbp;
+ struct resource resource;
+ struct device_node *np = op->dev.of_node;
+ struct device *dev = &op->dev;
+ struct device_node *opbnp;
+ int ret;
+ int num_gpios;
+ const unsigned int *clk;
+
+ master = spi_alloc_master(dev, sizeof *hw);
+ if (master == NULL)
+ return -ENOMEM;
+ master->dev.of_node = np;
+ platform_set_drvdata(op, master);
+ hw = spi_master_get_devdata(master);
+ hw->master = master;
+ hw->dev = dev;
+
+ init_completion(&hw->done);
+
+ /*
+ * A count of zero implies a single SPI device without any chip-select.
+ * Note that of_gpio_count counts all gpios assigned to this spi master.
+ * This includes both "null" gpio's and real ones.
+ */
+ num_gpios = of_gpio_count(np);
+ if (num_gpios > 0) {
+ int i;
+
+ hw->gpios = kzalloc(sizeof(int) * num_gpios, GFP_KERNEL);
+ if (!hw->gpios) {
+ ret = -ENOMEM;
+ goto free_master;
+ }
+
+ for (i = 0; i < num_gpios; i++) {
+ int gpio;
+ enum of_gpio_flags flags;
+
+ gpio = of_get_gpio_flags(np, i, &flags);
+ hw->gpios[i] = gpio;
+
+ if (gpio_is_valid(gpio)) {
+ /* Real CS - set the initial state. */
+ ret = gpio_request(gpio, np->name);
+ if (ret < 0) {
+ dev_err(dev, "can't request gpio "
+ "#%d: %d\n", i, ret);
+ goto free_gpios;
+ }
+
+ gpio_direction_output(gpio,
+ !!(flags & OF_GPIO_ACTIVE_LOW));
+ } else if (gpio == -EEXIST) {
+ ; /* No CS, but that's OK. */
+ } else {
+ dev_err(dev, "invalid gpio #%d: %d\n", i, gpio);
+ ret = -EINVAL;
+ goto free_gpios;
+ }
+ }
+ }
+
+ /* Setup the state for the bitbang driver */
+ bbp = &hw->bitbang;
+ bbp->master = hw->master;
+ bbp->setup_transfer = spi_ppc4xx_setupxfer;
+ bbp->chipselect = spi_ppc4xx_chipsel;
+ bbp->txrx_bufs = spi_ppc4xx_txrx;
+ bbp->use_dma = 0;
+ bbp->master->setup = spi_ppc4xx_setup;
+ bbp->master->cleanup = spi_ppc4xx_cleanup;
+ bbp->master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ /* the spi->mode bits understood by this driver: */
+ bbp->master->mode_bits =
+ SPI_CPHA | SPI_CPOL | SPI_CS_HIGH | SPI_LSB_FIRST;
+
+ /* this many pins in all GPIO controllers */
+ bbp->master->num_chipselect = num_gpios > 0 ? num_gpios : 0;
+
+ /* Get the clock for the OPB */
+ opbnp = of_find_compatible_node(NULL, NULL, "ibm,opb");
+ if (opbnp == NULL) {
+ dev_err(dev, "OPB: cannot find node\n");
+ ret = -ENODEV;
+ goto free_gpios;
+ }
+ /* Get the clock (Hz) for the OPB */
+ clk = of_get_property(opbnp, "clock-frequency", NULL);
+ if (clk == NULL) {
+ dev_err(dev, "OPB: no clock-frequency property set\n");
+ of_node_put(opbnp);
+ ret = -ENODEV;
+ goto free_gpios;
+ }
+ hw->opb_freq = *clk;
+ hw->opb_freq >>= 2;
+ of_node_put(opbnp);
+
+ ret = of_address_to_resource(np, 0, &resource);
+ if (ret) {
+ dev_err(dev, "error while parsing device node resource\n");
+ goto free_gpios;
+ }
+ hw->mapbase = resource.start;
+ hw->mapsize = resource_size(&resource);
+
+ /* Sanity check */
+ if (hw->mapsize < sizeof(struct spi_ppc4xx_regs)) {
+ dev_err(dev, "too small to map registers\n");
+ ret = -EINVAL;
+ goto free_gpios;
+ }
+
+ /* Request IRQ */
+ hw->irqnum = irq_of_parse_and_map(np, 0);
+ ret = request_irq(hw->irqnum, spi_ppc4xx_int,
+ 0, "spi_ppc4xx_of", (void *)hw);
+ if (ret) {
+ dev_err(dev, "unable to allocate interrupt\n");
+ goto free_gpios;
+ }
+
+ if (!request_mem_region(hw->mapbase, hw->mapsize, DRIVER_NAME)) {
+ dev_err(dev, "resource unavailable\n");
+ ret = -EBUSY;
+ goto request_mem_error;
+ }
+
+ hw->regs = ioremap(hw->mapbase, sizeof(struct spi_ppc4xx_regs));
+
+ if (!hw->regs) {
+ dev_err(dev, "unable to memory map registers\n");
+ ret = -ENXIO;
+ goto map_io_error;
+ }
+
+ spi_ppc4xx_enable(hw);
+
+ /* Finally register our spi controller */
+ dev->dma_mask = 0;
+ ret = spi_bitbang_start(bbp);
+ if (ret) {
+ dev_err(dev, "failed to register SPI master\n");
+ goto unmap_regs;
+ }
+
+ dev_info(dev, "driver initialized\n");
+
+ return 0;
+
+unmap_regs:
+ iounmap(hw->regs);
+map_io_error:
+ release_mem_region(hw->mapbase, hw->mapsize);
+request_mem_error:
+ free_irq(hw->irqnum, hw);
+free_gpios:
+ free_gpios(hw);
+free_master:
+ spi_master_put(master);
+
+ dev_err(dev, "initialization failed\n");
+ return ret;
+}
+
+static int spi_ppc4xx_of_remove(struct platform_device *op)
+{
+ struct spi_master *master = platform_get_drvdata(op);
+ struct ppc4xx_spi *hw = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&hw->bitbang);
+ release_mem_region(hw->mapbase, hw->mapsize);
+ free_irq(hw->irqnum, hw);
+ iounmap(hw->regs);
+ free_gpios(hw);
+ spi_master_put(master);
+ return 0;
+}
+
+static const struct of_device_id spi_ppc4xx_of_match[] = {
+ { .compatible = "ibm,ppc4xx-spi", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, spi_ppc4xx_of_match);
+
+static struct platform_driver spi_ppc4xx_of_driver = {
+ .probe = spi_ppc4xx_of_probe,
+ .remove = spi_ppc4xx_of_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .of_match_table = spi_ppc4xx_of_match,
+ },
+};
+module_platform_driver(spi_ppc4xx_of_driver);
+
+MODULE_AUTHOR("Gary Jennejohn & Stefan Roese");
+MODULE_DESCRIPTION("Simple PPC4xx SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-pxa2xx-dma.c b/drivers/spi/spi-pxa2xx-dma.c
new file mode 100644
index 000000000..66a173939
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-dma.c
@@ -0,0 +1,368 @@
+/*
+ * PXA2xx SPI DMA engine support.
+ *
+ * Copyright (C) 2013, Intel Corporation
+ * Author: Mika Westerberg <mika.westerberg@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include "spi-pxa2xx.h"
+
+static int pxa2xx_spi_map_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ int i, nents, len = drv_data->len;
+ struct scatterlist *sg;
+ struct device *dmadev;
+ struct sg_table *sgt;
+ void *buf, *pbuf;
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ buf = drv_data->tx;
+ drv_data->tx_map_len = len;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ buf = drv_data->rx;
+ drv_data->rx_map_len = len;
+ }
+
+ nents = DIV_ROUND_UP(len, SZ_2K);
+ if (nents != sgt->nents) {
+ int ret;
+
+ sg_free_table(sgt);
+ ret = sg_alloc_table(sgt, nents, GFP_ATOMIC);
+ if (ret)
+ return ret;
+ }
+
+ pbuf = buf;
+ for_each_sg(sgt->sgl, sg, sgt->nents, i) {
+ size_t bytes = min_t(size_t, len, SZ_2K);
+
+ if (buf)
+ sg_set_buf(sg, pbuf, bytes);
+ else
+ sg_set_buf(sg, drv_data->dummy, bytes);
+
+ pbuf += bytes;
+ len -= bytes;
+ }
+
+ nents = dma_map_sg(dmadev, sgt->sgl, sgt->nents, dir);
+ if (!nents)
+ return -ENOMEM;
+
+ return nents;
+}
+
+static void pxa2xx_spi_unmap_dma_buffer(struct driver_data *drv_data,
+ enum dma_data_direction dir)
+{
+ struct device *dmadev;
+ struct sg_table *sgt;
+
+ if (dir == DMA_TO_DEVICE) {
+ dmadev = drv_data->tx_chan->device->dev;
+ sgt = &drv_data->tx_sgt;
+ } else {
+ dmadev = drv_data->rx_chan->device->dev;
+ sgt = &drv_data->rx_sgt;
+ }
+
+ dma_unmap_sg(dmadev, sgt->sgl, sgt->nents, dir);
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ if (!drv_data->dma_mapped)
+ return;
+
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+
+ drv_data->dma_mapped = 0;
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data,
+ bool error)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /*
+ * It is possible that one CPU is handling ROR interrupt and other
+ * just gets DMA completion. Calling pump_transfers() twice for the
+ * same transfer leads to problems thus we prevent concurrent calls
+ * by using ->dma_running.
+ */
+ if (atomic_dec_and_test(&drv_data->dma_running)) {
+ /*
+ * If the other CPU is still handling the ROR interrupt we
+ * might not know about the error yet. So we re-check the
+ * ROR bit here before we clear the status register.
+ */
+ if (!error) {
+ u32 status = pxa2xx_spi_read(drv_data, SSSR)
+ & drv_data->mask_sr;
+ error = status & SSSR_ROR;
+ }
+
+ /* Clear status & disable interrupts */
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ if (!error) {
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ drv_data->tx += drv_data->tx_map_len;
+ drv_data->rx += drv_data->rx_map_len;
+
+ msg->actual_length += drv_data->len;
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+ } else {
+ /* In case we got an error we disable the SSP now */
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0)
+ & ~SSCR0_SSE);
+
+ msg->state = ERROR_STATE;
+ }
+
+ tasklet_schedule(&drv_data->pump_transfers);
+ }
+}
+
+static void pxa2xx_spi_dma_callback(void *data)
+{
+ pxa2xx_spi_dma_transfer_complete(data, false);
+}
+
+static struct dma_async_tx_descriptor *
+pxa2xx_spi_dma_prepare_one(struct driver_data *drv_data,
+ enum dma_transfer_direction dir)
+{
+ struct chip_data *chip = drv_data->cur_chip;
+ enum dma_slave_buswidth width;
+ struct dma_slave_config cfg;
+ struct dma_chan *chan;
+ struct sg_table *sgt;
+ int nents, ret;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ break;
+ case 2:
+ width = DMA_SLAVE_BUSWIDTH_2_BYTES;
+ break;
+ default:
+ width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ break;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = drv_data->ssdr_physical;
+ cfg.dst_addr_width = width;
+ cfg.dst_maxburst = chip->dma_burst_size;
+
+ sgt = &drv_data->tx_sgt;
+ nents = drv_data->tx_nents;
+ chan = drv_data->tx_chan;
+ } else {
+ cfg.src_addr = drv_data->ssdr_physical;
+ cfg.src_addr_width = width;
+ cfg.src_maxburst = chip->dma_burst_size;
+
+ sgt = &drv_data->rx_sgt;
+ nents = drv_data->rx_nents;
+ chan = drv_data->rx_chan;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(&drv_data->pdev->dev, "DMA slave config failed\n");
+ return NULL;
+ }
+
+ return dmaengine_prep_slave_sg(chan, sgt->sgl, nents, dir,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+}
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ return len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ const struct chip_data *chip = drv_data->cur_chip;
+ int ret;
+
+ if (!chip->enable_dma)
+ return 0;
+
+ /* Don't bother with DMA if we can't do even a single burst */
+ if (drv_data->len < chip->dma_burst_size)
+ return 0;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_TO_DEVICE);
+ if (ret <= 0) {
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map TX\n");
+ return 0;
+ }
+
+ drv_data->tx_nents = ret;
+
+ ret = pxa2xx_spi_map_dma_buffer(drv_data, DMA_FROM_DEVICE);
+ if (ret <= 0) {
+ pxa2xx_spi_unmap_dma_buffer(drv_data, DMA_TO_DEVICE);
+ dev_warn(&drv_data->pdev->dev, "failed to DMA map RX\n");
+ return 0;
+ }
+
+ drv_data->rx_nents = ret;
+ return 1;
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 status;
+
+ status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
+ if (status & SSSR_ROR) {
+ dev_err(&drv_data->pdev->dev, "FIFO overrun\n");
+
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dmaengine_terminate_all(drv_data->tx_chan);
+
+ pxa2xx_spi_dma_transfer_complete(drv_data, true);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ struct dma_async_tx_descriptor *tx_desc, *rx_desc;
+
+ tx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_MEM_TO_DEV);
+ if (!tx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA TX descriptor\n");
+ return -EBUSY;
+ }
+
+ rx_desc = pxa2xx_spi_dma_prepare_one(drv_data, DMA_DEV_TO_MEM);
+ if (!rx_desc) {
+ dev_err(&drv_data->pdev->dev,
+ "failed to get DMA RX descriptor\n");
+ return -EBUSY;
+ }
+
+ /* We are ready when RX completes */
+ rx_desc->callback = pxa2xx_spi_dma_callback;
+ rx_desc->callback_param = drv_data;
+
+ dmaengine_submit(rx_desc);
+ dmaengine_submit(tx_desc);
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ dma_async_issue_pending(drv_data->rx_chan);
+ dma_async_issue_pending(drv_data->tx_chan);
+
+ atomic_set(&drv_data->dma_running, 1);
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct pxa2xx_spi_master *pdata = drv_data->master_info;
+ struct device *dev = &drv_data->pdev->dev;
+ dma_cap_mask_t mask;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ drv_data->dummy = devm_kzalloc(dev, SZ_2K, GFP_KERNEL);
+ if (!drv_data->dummy)
+ return -ENOMEM;
+
+ drv_data->tx_chan = dma_request_slave_channel_compat(mask,
+ pdata->dma_filter, pdata->tx_param, dev, "tx");
+ if (!drv_data->tx_chan)
+ return -ENODEV;
+
+ drv_data->rx_chan = dma_request_slave_channel_compat(mask,
+ pdata->dma_filter, pdata->rx_param, dev, "rx");
+ if (!drv_data->rx_chan) {
+ dma_release_channel(drv_data->tx_chan);
+ drv_data->tx_chan = NULL;
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ if (drv_data->rx_chan) {
+ dmaengine_terminate_all(drv_data->rx_chan);
+ dma_release_channel(drv_data->rx_chan);
+ sg_free_table(&drv_data->rx_sgt);
+ drv_data->rx_chan = NULL;
+ }
+ if (drv_data->tx_chan) {
+ dmaengine_terminate_all(drv_data->tx_chan);
+ dma_release_channel(drv_data->tx_chan);
+ sg_free_table(&drv_data->tx_sgt);
+ drv_data->tx_chan = NULL;
+ }
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info = spi->controller_data;
+
+ /*
+ * If the DMA burst size is given in chip_info we use that,
+ * otherwise we use the default. Also we use the default FIFO
+ * thresholds for now.
+ */
+ *burst_code = chip_info ? chip_info->dma_burst_size : 1;
+ *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
+ | SSCR1_TxTresh(TX_THRESH_DFLT);
+
+ return 0;
+}
diff --git a/drivers/spi/spi-pxa2xx-pci.c b/drivers/spi/spi-pxa2xx-pci.c
new file mode 100644
index 000000000..fa7399e84
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pci.c
@@ -0,0 +1,221 @@
+/*
+ * CE4100's SPI device is more or less the same one as found on PXA
+ *
+ */
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/clk.h>
+#include <linux/clk-provider.h>
+
+#include <linux/dmaengine.h>
+#include <linux/platform_data/dma-dw.h>
+
+enum {
+ PORT_CE4100,
+ PORT_BYT,
+ PORT_BSW0,
+ PORT_BSW1,
+ PORT_BSW2,
+ PORT_QUARK_X1000,
+};
+
+struct pxa_spi_info {
+ enum pxa_ssp_type type;
+ int port_id;
+ int num_chipselect;
+ unsigned long max_clk_rate;
+
+ /* DMA channel request parameters */
+ void *tx_param;
+ void *rx_param;
+};
+
+static struct dw_dma_slave byt_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave byt_rx_param = { .src_id = 1 };
+
+static struct dw_dma_slave bsw0_tx_param = { .dst_id = 0 };
+static struct dw_dma_slave bsw0_rx_param = { .src_id = 1 };
+static struct dw_dma_slave bsw1_tx_param = { .dst_id = 6 };
+static struct dw_dma_slave bsw1_rx_param = { .src_id = 7 };
+static struct dw_dma_slave bsw2_tx_param = { .dst_id = 8 };
+static struct dw_dma_slave bsw2_rx_param = { .src_id = 9 };
+
+static bool lpss_dma_filter(struct dma_chan *chan, void *param)
+{
+ struct dw_dma_slave *dws = param;
+
+ if (dws->dma_dev != chan->device->dev)
+ return false;
+
+ chan->private = dws;
+ return true;
+}
+
+static struct pxa_spi_info spi_info_configs[] = {
+ [PORT_CE4100] = {
+ .type = PXA25x_SSP,
+ .port_id = -1,
+ .num_chipselect = -1,
+ .max_clk_rate = 3686400,
+ },
+ [PORT_BYT] = {
+ .type = LPSS_SSP,
+ .port_id = 0,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ .tx_param = &byt_tx_param,
+ .rx_param = &byt_rx_param,
+ },
+ [PORT_BSW0] = {
+ .type = LPSS_SSP,
+ .port_id = 0,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ .tx_param = &bsw0_tx_param,
+ .rx_param = &bsw0_rx_param,
+ },
+ [PORT_BSW1] = {
+ .type = LPSS_SSP,
+ .port_id = 1,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ .tx_param = &bsw1_tx_param,
+ .rx_param = &bsw1_rx_param,
+ },
+ [PORT_BSW2] = {
+ .type = LPSS_SSP,
+ .port_id = 2,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ .tx_param = &bsw2_tx_param,
+ .rx_param = &bsw2_rx_param,
+ },
+ [PORT_QUARK_X1000] = {
+ .type = QUARK_X1000_SSP,
+ .port_id = -1,
+ .num_chipselect = 1,
+ .max_clk_rate = 50000000,
+ },
+};
+
+static int pxa2xx_spi_pci_probe(struct pci_dev *dev,
+ const struct pci_device_id *ent)
+{
+ struct platform_device_info pi;
+ int ret;
+ struct platform_device *pdev;
+ struct pxa2xx_spi_master spi_pdata;
+ struct ssp_device *ssp;
+ struct pxa_spi_info *c;
+ char buf[40];
+ struct pci_dev *dma_dev;
+
+ ret = pcim_enable_device(dev);
+ if (ret)
+ return ret;
+
+ ret = pcim_iomap_regions(dev, 1 << 0, "PXA2xx SPI");
+ if (ret)
+ return ret;
+
+ c = &spi_info_configs[ent->driver_data];
+
+ memset(&spi_pdata, 0, sizeof(spi_pdata));
+ spi_pdata.num_chipselect = (c->num_chipselect > 0) ?
+ c->num_chipselect : dev->devfn;
+
+ dma_dev = pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
+
+ if (c->tx_param) {
+ struct dw_dma_slave *slave = c->tx_param;
+
+ slave->dma_dev = &dma_dev->dev;
+ slave->src_master = 1;
+ slave->dst_master = 0;
+ }
+
+ if (c->rx_param) {
+ struct dw_dma_slave *slave = c->rx_param;
+
+ slave->dma_dev = &dma_dev->dev;
+ slave->src_master = 1;
+ slave->dst_master = 0;
+ }
+
+ spi_pdata.dma_filter = lpss_dma_filter;
+ spi_pdata.tx_param = c->tx_param;
+ spi_pdata.rx_param = c->rx_param;
+ spi_pdata.enable_dma = c->rx_param && c->tx_param;
+
+ ssp = &spi_pdata.ssp;
+ ssp->phys_base = pci_resource_start(dev, 0);
+ ssp->mmio_base = pcim_iomap_table(dev)[0];
+ if (!ssp->mmio_base) {
+ dev_err(&dev->dev, "failed to ioremap() registers\n");
+ return -EIO;
+ }
+ ssp->irq = dev->irq;
+ ssp->port_id = (c->port_id >= 0) ? c->port_id : dev->devfn;
+ ssp->type = c->type;
+
+ snprintf(buf, sizeof(buf), "pxa2xx-spi.%d", ssp->port_id);
+ ssp->clk = clk_register_fixed_rate(&dev->dev, buf , NULL,
+ CLK_IS_ROOT, c->max_clk_rate);
+ if (IS_ERR(ssp->clk))
+ return PTR_ERR(ssp->clk);
+
+ memset(&pi, 0, sizeof(pi));
+ pi.parent = &dev->dev;
+ pi.name = "pxa2xx-spi";
+ pi.id = ssp->port_id;
+ pi.data = &spi_pdata;
+ pi.size_data = sizeof(spi_pdata);
+
+ pdev = platform_device_register_full(&pi);
+ if (IS_ERR(pdev)) {
+ clk_unregister(ssp->clk);
+ return PTR_ERR(pdev);
+ }
+
+ pci_set_drvdata(dev, pdev);
+
+ return 0;
+}
+
+static void pxa2xx_spi_pci_remove(struct pci_dev *dev)
+{
+ struct platform_device *pdev = pci_get_drvdata(dev);
+ struct pxa2xx_spi_master *spi_pdata;
+
+ spi_pdata = dev_get_platdata(&pdev->dev);
+
+ platform_device_unregister(pdev);
+ clk_unregister(spi_pdata->ssp.clk);
+}
+
+static const struct pci_device_id pxa2xx_spi_pci_devices[] = {
+ { PCI_VDEVICE(INTEL, 0x2e6a), PORT_CE4100 },
+ { PCI_VDEVICE(INTEL, 0x0935), PORT_QUARK_X1000 },
+ { PCI_VDEVICE(INTEL, 0x0f0e), PORT_BYT },
+ { PCI_VDEVICE(INTEL, 0x228e), PORT_BSW0 },
+ { PCI_VDEVICE(INTEL, 0x2290), PORT_BSW1 },
+ { PCI_VDEVICE(INTEL, 0x22ac), PORT_BSW2 },
+ { },
+};
+MODULE_DEVICE_TABLE(pci, pxa2xx_spi_pci_devices);
+
+static struct pci_driver pxa2xx_spi_pci_driver = {
+ .name = "pxa2xx_spi_pci",
+ .id_table = pxa2xx_spi_pci_devices,
+ .probe = pxa2xx_spi_pci_probe,
+ .remove = pxa2xx_spi_pci_remove,
+};
+
+module_pci_driver(pxa2xx_spi_pci_driver);
+
+MODULE_DESCRIPTION("CE4100/LPSS PCI-SPI glue code for PXA's driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Sebastian Andrzej Siewior <bigeasy@linutronix.de>");
diff --git a/drivers/spi/spi-pxa2xx-pxadma.c b/drivers/spi/spi-pxa2xx-pxadma.c
new file mode 100644
index 000000000..2e0796a00
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx-pxadma.c
@@ -0,0 +1,487 @@
+/*
+ * PXA2xx SPI private DMA support.
+ *
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/dma-mapping.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+#include <mach/dma.h>
+#include "spi-pxa2xx.h"
+
+#define DMA_INT_MASK (DCSR_ENDINTR | DCSR_STARTINTR | DCSR_BUSERR)
+#define RESET_DMA_CHANNEL (DCSR_NODESC | DMA_INT_MASK)
+
+bool pxa2xx_spi_dma_is_possible(size_t len)
+{
+ /* Try to map dma buffer and do a dma transfer if successful, but
+ * only if the length is non-zero and less than MAX_DMA_LEN.
+ *
+ * Zero-length non-descriptor DMA is illegal on PXA2xx; force use
+ * of PIO instead. Care is needed above because the transfer may
+ * have have been passed with buffers that are already dma mapped.
+ * A zero-length transfer in PIO mode will not try to write/read
+ * to/from the buffers
+ *
+ * REVISIT large transfers are exactly where we most want to be
+ * using DMA. If this happens much, split those transfers into
+ * multiple DMA segments rather than forcing PIO.
+ */
+ return len > 0 && len <= MAX_DMA_LEN;
+}
+
+int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct device *dev = &msg->spi->dev;
+
+ if (!drv_data->cur_chip->enable_dma)
+ return 0;
+
+ if (msg->is_dma_mapped)
+ return drv_data->rx_dma && drv_data->tx_dma;
+
+ if (!IS_DMA_ALIGNED(drv_data->rx) || !IS_DMA_ALIGNED(drv_data->tx))
+ return 0;
+
+ /* Modify setup if rx buffer is null */
+ if (drv_data->rx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->rx = drv_data->null_dma_buf;
+ drv_data->rx_map_len = 4;
+ } else
+ drv_data->rx_map_len = drv_data->len;
+
+
+ /* Modify setup if tx buffer is null */
+ if (drv_data->tx == NULL) {
+ *drv_data->null_dma_buf = 0;
+ drv_data->tx = drv_data->null_dma_buf;
+ drv_data->tx_map_len = 4;
+ } else
+ drv_data->tx_map_len = drv_data->len;
+
+ /* Stream map the tx buffer. Always do DMA_TO_DEVICE first
+ * so we flush the cache *before* invalidating it, in case
+ * the tx and rx buffers overlap.
+ */
+ drv_data->tx_dma = dma_map_single(dev, drv_data->tx,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, drv_data->tx_dma))
+ return 0;
+
+ /* Stream map the rx buffer */
+ drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ if (dma_mapping_error(dev, drv_data->rx_dma)) {
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ return 0;
+ }
+
+ return 1;
+}
+
+static void pxa2xx_spi_unmap_dma_buffers(struct driver_data *drv_data)
+{
+ struct device *dev;
+
+ if (!drv_data->dma_mapped)
+ return;
+
+ if (!drv_data->cur_msg->is_dma_mapped) {
+ dev = &drv_data->cur_msg->spi->dev;
+ dma_unmap_single(dev, drv_data->rx_dma,
+ drv_data->rx_map_len, DMA_FROM_DEVICE);
+ dma_unmap_single(dev, drv_data->tx_dma,
+ drv_data->tx_map_len, DMA_TO_DEVICE);
+ }
+
+ drv_data->dma_mapped = 0;
+}
+
+static int wait_ssp_rx_stall(struct driver_data *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static int wait_dma_channel_stop(int channel)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ while (!(DCSR(channel) & DCSR_STOPSTATE) && --limit)
+ cpu_relax();
+
+ return limit;
+}
+
+static void pxa2xx_spi_dma_error_stop(struct driver_data *drv_data,
+ const char *msg)
+{
+ /* Stop and reset */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+ pxa2xx_spi_flush(drv_data);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ dev_err(&drv_data->pdev->dev, "%s\n", msg);
+
+ drv_data->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static void pxa2xx_spi_dma_transfer_complete(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+
+ /* Clear and disable interrupts on SSP and DMA channels*/
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->dma_cr1);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+
+ if (wait_dma_channel_stop(drv_data->rx_channel) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: dma rx channel stop failed\n");
+
+ if (wait_ssp_rx_stall(drv_data->ioaddr) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_transfer: ssp rx stall failed\n");
+
+ pxa2xx_spi_unmap_dma_buffers(drv_data);
+
+ /* update the buffer pointer for the amount completed in dma */
+ drv_data->rx += drv_data->len -
+ (DCMD(drv_data->rx_channel) & DCMD_LENGTH);
+
+ /* read trailing data from fifo, it does not matter how many
+ * bytes are in the fifo just read until buffer is full
+ * or fifo is empty, which ever occurs first */
+ drv_data->read(drv_data);
+
+ /* return count of what was actually read */
+ msg->actual_length += drv_data->len -
+ (drv_data->rx_end - drv_data->rx);
+
+ /* Transfer delays and chip select release are
+ * handled in pump_transfers or giveback
+ */
+
+ /* Move to next transfer */
+ msg->state = pxa2xx_spi_next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+void pxa2xx_spi_dma_handler(int channel, void *data)
+{
+ struct driver_data *drv_data = data;
+ u32 irq_status = DCSR(channel) & DMA_INT_MASK;
+
+ if (irq_status & DCSR_BUSERR) {
+
+ if (channel == drv_data->tx_channel)
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on tx channel");
+ else
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_handler: bad bus address on rx channel");
+ return;
+ }
+
+ /* PXA255x_SSP has no timeout interrupt, wait for tailing bytes */
+ if ((channel == drv_data->tx_channel)
+ && (irq_status & DCSR_ENDINTR)
+ && (drv_data->ssp_type == PXA25x_SSP)) {
+
+ /* Wait for rx to stall */
+ if (wait_ssp_rx_stall(drv_data) == 0)
+ dev_err(&drv_data->pdev->dev,
+ "dma_handler: ssp rx stall failed\n");
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+ }
+}
+
+irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data)
+{
+ u32 irq_status;
+
+ irq_status = pxa2xx_spi_read(drv_data, SSSR) & drv_data->mask_sr;
+ if (irq_status & SSSR_ROR) {
+ pxa2xx_spi_dma_error_stop(drv_data,
+ "dma_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ /* Check for false positive timeout */
+ if ((irq_status & SSSR_TINT)
+ && (DCSR(drv_data->tx_channel) & DCSR_RUN)) {
+ pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT || drv_data->rx == drv_data->rx_end) {
+
+ /* Clear and disable timeout interrupt, do the rest in
+ * dma_transfer_complete */
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ /* finish this transfer, start the next */
+ pxa2xx_spi_dma_transfer_complete(drv_data);
+
+ return IRQ_HANDLED;
+ }
+
+ /* Opps problem detected */
+ return IRQ_NONE;
+}
+
+int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst)
+{
+ u32 dma_width;
+
+ switch (drv_data->n_bytes) {
+ case 1:
+ dma_width = DCMD_WIDTH1;
+ break;
+ case 2:
+ dma_width = DCMD_WIDTH2;
+ break;
+ default:
+ dma_width = DCMD_WIDTH4;
+ break;
+ }
+
+ /* Setup rx DMA Channel */
+ DCSR(drv_data->rx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->rx_channel) = drv_data->ssdr_physical;
+ DTADR(drv_data->rx_channel) = drv_data->rx_dma;
+ if (drv_data->rx == drv_data->null_dma_buf)
+ /* No target address increment */
+ DCMD(drv_data->rx_channel) = DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->rx_channel) = DCMD_INCTRGADDR
+ | DCMD_FLOWSRC
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Setup tx DMA Channel */
+ DCSR(drv_data->tx_channel) = RESET_DMA_CHANNEL;
+ DSADR(drv_data->tx_channel) = drv_data->tx_dma;
+ DTADR(drv_data->tx_channel) = drv_data->ssdr_physical;
+ if (drv_data->tx == drv_data->null_dma_buf)
+ /* No source address increment */
+ DCMD(drv_data->tx_channel) = DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+ else
+ DCMD(drv_data->tx_channel) = DCMD_INCSRCADDR
+ | DCMD_FLOWTRG
+ | dma_width
+ | dma_burst
+ | drv_data->len;
+
+ /* Enable dma end irqs on SSP to detect end of transfer */
+ if (drv_data->ssp_type == PXA25x_SSP)
+ DCMD(drv_data->tx_channel) |= DCMD_ENDIRQEN;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_start(struct driver_data *drv_data)
+{
+ DCSR(drv_data->rx_channel) |= DCSR_RUN;
+ DCSR(drv_data->tx_channel) |= DCSR_RUN;
+}
+
+int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ struct device *dev = &drv_data->pdev->dev;
+ struct ssp_device *ssp = drv_data->ssp;
+
+ /* Get two DMA channels (rx and tx) */
+ drv_data->rx_channel = pxa_request_dma("pxa2xx_spi_ssp_rx",
+ DMA_PRIO_HIGH,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->rx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting rx channel\n",
+ drv_data->rx_channel);
+ return -ENODEV;
+ }
+ drv_data->tx_channel = pxa_request_dma("pxa2xx_spi_ssp_tx",
+ DMA_PRIO_MEDIUM,
+ pxa2xx_spi_dma_handler,
+ drv_data);
+ if (drv_data->tx_channel < 0) {
+ dev_err(dev, "problem (%d) requesting tx channel\n",
+ drv_data->tx_channel);
+ pxa_free_dma(drv_data->rx_channel);
+ return -ENODEV;
+ }
+
+ DRCMR(ssp->drcmr_rx) = DRCMR_MAPVLD | drv_data->rx_channel;
+ DRCMR(ssp->drcmr_tx) = DRCMR_MAPVLD | drv_data->tx_channel;
+
+ return 0;
+}
+
+void pxa2xx_spi_dma_release(struct driver_data *drv_data)
+{
+ struct ssp_device *ssp = drv_data->ssp;
+
+ DRCMR(ssp->drcmr_rx) = 0;
+ DRCMR(ssp->drcmr_tx) = 0;
+
+ if (drv_data->tx_channel != 0)
+ pxa_free_dma(drv_data->tx_channel);
+ if (drv_data->rx_channel != 0)
+ pxa_free_dma(drv_data->rx_channel);
+}
+
+void pxa2xx_spi_dma_resume(struct driver_data *drv_data)
+{
+ if (drv_data->rx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_rx) =
+ DRCMR_MAPVLD | drv_data->rx_channel;
+ if (drv_data->tx_channel != -1)
+ DRCMR(drv_data->ssp->drcmr_tx) =
+ DRCMR_MAPVLD | drv_data->tx_channel;
+}
+
+int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word, u32 *burst_code,
+ u32 *threshold)
+{
+ struct pxa2xx_spi_chip *chip_info =
+ (struct pxa2xx_spi_chip *)spi->controller_data;
+ int bytes_per_word;
+ int burst_bytes;
+ int thresh_words;
+ int req_burst_size;
+ int retval = 0;
+
+ /* Set the threshold (in registers) to equal the same amount of data
+ * as represented by burst size (in bytes). The computation below
+ * is (burst_size rounded up to nearest 8 byte, word or long word)
+ * divided by (bytes/register); the tx threshold is the inverse of
+ * the rx, so that there will always be enough data in the rx fifo
+ * to satisfy a burst, and there will always be enough space in the
+ * tx fifo to accept a burst (a tx burst will overwrite the fifo if
+ * there is not enough space), there must always remain enough empty
+ * space in the rx fifo for any data loaded to the tx fifo.
+ * Whenever burst_size (in bytes) equals bits/word, the fifo threshold
+ * will be 8, or half the fifo;
+ * The threshold can only be set to 2, 4 or 8, but not 16, because
+ * to burst 16 to the tx fifo, the fifo would have to be empty;
+ * however, the minimum fifo trigger level is 1, and the tx will
+ * request service when the fifo is at this level, with only 15 spaces.
+ */
+
+ /* find bytes/word */
+ if (bits_per_word <= 8)
+ bytes_per_word = 1;
+ else if (bits_per_word <= 16)
+ bytes_per_word = 2;
+ else
+ bytes_per_word = 4;
+
+ /* use struct pxa2xx_spi_chip->dma_burst_size if available */
+ if (chip_info)
+ req_burst_size = chip_info->dma_burst_size;
+ else {
+ switch (chip->dma_burst_size) {
+ default:
+ /* if the default burst size is not set,
+ * do it now */
+ chip->dma_burst_size = DCMD_BURST8;
+ case DCMD_BURST8:
+ req_burst_size = 8;
+ break;
+ case DCMD_BURST16:
+ req_burst_size = 16;
+ break;
+ case DCMD_BURST32:
+ req_burst_size = 32;
+ break;
+ }
+ }
+ if (req_burst_size <= 8) {
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ } else if (req_burst_size <= 16) {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ }
+ } else {
+ if (bytes_per_word == 1) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST8;
+ burst_bytes = 8;
+ retval = 1;
+ } else if (bytes_per_word == 2) {
+ /* don't burst more than 1/2 the fifo */
+ *burst_code = DCMD_BURST16;
+ burst_bytes = 16;
+ retval = 1;
+ } else {
+ *burst_code = DCMD_BURST32;
+ burst_bytes = 32;
+ }
+ }
+
+ thresh_words = burst_bytes / bytes_per_word;
+
+ /* thresh_words will be between 2 and 8 */
+ *threshold = (SSCR1_RxTresh(thresh_words) & SSCR1_RFT)
+ | (SSCR1_TxTresh(16-thresh_words) & SSCR1_TFT);
+
+ return retval;
+}
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
new file mode 100644
index 000000000..e3223ac75
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.c
@@ -0,0 +1,1600 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/ioport.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/platform_device.h>
+#include <linux/spi/pxa2xx_spi.h>
+#include <linux/spi/spi.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/pm_runtime.h>
+#include <linux/acpi.h>
+
+#include "spi-pxa2xx.h"
+
+MODULE_AUTHOR("Stephen Street");
+MODULE_DESCRIPTION("PXA2xx SSP SPI Controller");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:pxa2xx-spi");
+
+#define TIMOUT_DFLT 1000
+
+/*
+ * for testing SSCR1 changes that require SSP restart, basically
+ * everything except the service and interrupt enables, the pxa270 developer
+ * manual says only SSCR1_SCFR, SSCR1_SPH, SSCR1_SPO need to be in this
+ * list, but the PXA255 dev man says all bits without really meaning the
+ * service and interrupt enables
+ */
+#define SSCR1_CHANGE_MASK (SSCR1_TTELP | SSCR1_TTE | SSCR1_SCFR \
+ | SSCR1_ECRA | SSCR1_ECRB | SSCR1_SCLKDIR \
+ | SSCR1_SFRMDIR | SSCR1_RWOT | SSCR1_TRAIL \
+ | SSCR1_IFS | SSCR1_STRF | SSCR1_EFWR \
+ | SSCR1_RFT | SSCR1_TFT | SSCR1_MWDS \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define QUARK_X1000_SSCR1_CHANGE_MASK (QUARK_X1000_SSCR1_STRF \
+ | QUARK_X1000_SSCR1_EFWR \
+ | QUARK_X1000_SSCR1_RFT \
+ | QUARK_X1000_SSCR1_TFT \
+ | SSCR1_SPH | SSCR1_SPO | SSCR1_LBM)
+
+#define LPSS_RX_THRESH_DFLT 64
+#define LPSS_TX_LOTHRESH_DFLT 160
+#define LPSS_TX_HITHRESH_DFLT 224
+
+/* Offset from drv_data->lpss_base */
+#define GENERAL_REG 0x08
+#define GENERAL_REG_RXTO_HOLDOFF_DISABLE BIT(24)
+#define SSP_REG 0x0c
+#define SPI_CS_CONTROL 0x18
+#define SPI_CS_CONTROL_SW_MODE BIT(0)
+#define SPI_CS_CONTROL_CS_HIGH BIT(1)
+
+static bool is_lpss_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == LPSS_SSP;
+}
+
+static bool is_quark_x1000_ssp(const struct driver_data *drv_data)
+{
+ return drv_data->ssp_type == QUARK_X1000_SSP;
+}
+
+static u32 pxa2xx_spi_get_ssrc1_change_mask(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return QUARK_X1000_SSCR1_CHANGE_MASK;
+ default:
+ return SSCR1_CHANGE_MASK;
+ }
+}
+
+static u32
+pxa2xx_spi_get_rx_default_thre(const struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return RX_THRESH_QUARK_X1000_DFLT;
+ default:
+ return RX_THRESH_DFLT;
+ }
+}
+
+static bool pxa2xx_spi_txfifo_full(const struct driver_data *drv_data)
+{
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSSR_TFL_MASK;
+ break;
+ default:
+ mask = SSSR_TFL_MASK;
+ break;
+ }
+
+ return (pxa2xx_spi_read(drv_data, SSSR) & mask) == mask;
+}
+
+static void pxa2xx_spi_clear_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg)
+{
+ u32 mask;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ mask = QUARK_X1000_SSCR1_RFT;
+ break;
+ default:
+ mask = SSCR1_RFT;
+ break;
+ }
+ *sccr1_reg &= ~mask;
+}
+
+static void pxa2xx_spi_set_rx_thre(const struct driver_data *drv_data,
+ u32 *sccr1_reg, u32 threshold)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ *sccr1_reg |= QUARK_X1000_SSCR1_RxTresh(threshold);
+ break;
+ default:
+ *sccr1_reg |= SSCR1_RxTresh(threshold);
+ break;
+ }
+}
+
+static u32 pxa2xx_configure_sscr0(const struct driver_data *drv_data,
+ u32 clk_div, u8 bits)
+{
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ return clk_div
+ | QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(bits > 32 ? 8 : bits)
+ | SSCR0_SSE;
+ default:
+ return clk_div
+ | SSCR0_Motorola
+ | SSCR0_DataSize(bits > 16 ? bits - 16 : bits)
+ | SSCR0_SSE
+ | (bits > 16 ? SSCR0_EDSS : 0);
+ }
+}
+
+/*
+ * Read and write LPSS SSP private registers. Caller must first check that
+ * is_lpss_ssp() returns true before these can be called.
+ */
+static u32 __lpss_ssp_read_priv(struct driver_data *drv_data, unsigned offset)
+{
+ WARN_ON(!drv_data->lpss_base);
+ return readl(drv_data->lpss_base + offset);
+}
+
+static void __lpss_ssp_write_priv(struct driver_data *drv_data,
+ unsigned offset, u32 value)
+{
+ WARN_ON(!drv_data->lpss_base);
+ writel(value, drv_data->lpss_base + offset);
+}
+
+/*
+ * lpss_ssp_setup - perform LPSS SSP specific setup
+ * @drv_data: pointer to the driver private data
+ *
+ * Perform LPSS SSP specific setup. This function must be called first if
+ * one is going to use LPSS SSP private registers.
+ */
+static void lpss_ssp_setup(struct driver_data *drv_data)
+{
+ unsigned offset = 0x400;
+ u32 value, orig;
+
+ /*
+ * Perform auto-detection of the LPSS SSP private registers. They
+ * can be either at 1k or 2k offset from the base address.
+ */
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
+ value = orig | SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != (orig | SPI_CS_CONTROL_SW_MODE)) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+ orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+ /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
+ value = orig & ~SPI_CS_CONTROL_SW_MODE;
+ writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+ if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
+ offset = 0x800;
+ goto detection_done;
+ }
+
+detection_done:
+ /* Now set the LPSS base */
+ drv_data->lpss_base = drv_data->ioaddr + offset;
+
+ /* Enable software chip select control */
+ value = SPI_CS_CONTROL_SW_MODE | SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+
+ /* Enable multiblock DMA transfers */
+ if (drv_data->master_info->enable_dma) {
+ __lpss_ssp_write_priv(drv_data, SSP_REG, 1);
+
+ value = __lpss_ssp_read_priv(drv_data, GENERAL_REG);
+ value |= GENERAL_REG_RXTO_HOLDOFF_DISABLE;
+ __lpss_ssp_write_priv(drv_data, GENERAL_REG, value);
+ }
+}
+
+static void lpss_ssp_cs_control(struct driver_data *drv_data, bool enable)
+{
+ u32 value;
+
+ value = __lpss_ssp_read_priv(drv_data, SPI_CS_CONTROL);
+ if (enable)
+ value &= ~SPI_CS_CONTROL_CS_HIGH;
+ else
+ value |= SPI_CS_CONTROL_CS_HIGH;
+ __lpss_ssp_write_priv(drv_data, SPI_CS_CONTROL, value);
+}
+
+static void cs_assert(struct driver_data *drv_data)
+{
+ struct chip_data *chip = drv_data->cur_chip;
+
+ if (drv_data->ssp_type == CE4100_SSP) {
+ pxa2xx_spi_write(drv_data, SSSR, drv_data->cur_chip->frm);
+ return;
+ }
+
+ if (chip->cs_control) {
+ chip->cs_control(PXA2XX_CS_ASSERT);
+ return;
+ }
+
+ if (gpio_is_valid(chip->gpio_cs)) {
+ gpio_set_value(chip->gpio_cs, chip->gpio_cs_inverted);
+ return;
+ }
+
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(drv_data, true);
+}
+
+static void cs_deassert(struct driver_data *drv_data)
+{
+ struct chip_data *chip = drv_data->cur_chip;
+
+ if (drv_data->ssp_type == CE4100_SSP)
+ return;
+
+ if (chip->cs_control) {
+ chip->cs_control(PXA2XX_CS_DEASSERT);
+ return;
+ }
+
+ if (gpio_is_valid(chip->gpio_cs)) {
+ gpio_set_value(chip->gpio_cs, !chip->gpio_cs_inverted);
+ return;
+ }
+
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_cs_control(drv_data, false);
+}
+
+int pxa2xx_spi_flush(struct driver_data *drv_data)
+{
+ unsigned long limit = loops_per_jiffy << 1;
+
+ do {
+ while (pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ pxa2xx_spi_read(drv_data, SSDR);
+ } while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_BSY) && --limit);
+ write_SSSR_CS(drv_data, SSSR_ROR);
+
+ return limit;
+}
+
+static int null_writer(struct driver_data *drv_data)
+{
+ u8 n_bytes = drv_data->n_bytes;
+
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, 0);
+ drv_data->tx += n_bytes;
+
+ return 1;
+}
+
+static int null_reader(struct driver_data *drv_data)
+{
+ u8 n_bytes = drv_data->n_bytes;
+
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += n_bytes;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u8_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u8 *)(drv_data->tx));
+ ++drv_data->tx;
+
+ return 1;
+}
+
+static int u8_reader(struct driver_data *drv_data)
+{
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u8 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ ++drv_data->rx;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u16_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u16 *)(drv_data->tx));
+ drv_data->tx += 2;
+
+ return 1;
+}
+
+static int u16_reader(struct driver_data *drv_data)
+{
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u16 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += 2;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+static int u32_writer(struct driver_data *drv_data)
+{
+ if (pxa2xx_spi_txfifo_full(drv_data)
+ || (drv_data->tx == drv_data->tx_end))
+ return 0;
+
+ pxa2xx_spi_write(drv_data, SSDR, *(u32 *)(drv_data->tx));
+ drv_data->tx += 4;
+
+ return 1;
+}
+
+static int u32_reader(struct driver_data *drv_data)
+{
+ while ((pxa2xx_spi_read(drv_data, SSSR) & SSSR_RNE)
+ && (drv_data->rx < drv_data->rx_end)) {
+ *(u32 *)(drv_data->rx) = pxa2xx_spi_read(drv_data, SSDR);
+ drv_data->rx += 4;
+ }
+
+ return drv_data->rx == drv_data->rx_end;
+}
+
+void *pxa2xx_spi_next_transfer(struct driver_data *drv_data)
+{
+ struct spi_message *msg = drv_data->cur_msg;
+ struct spi_transfer *trans = drv_data->cur_transfer;
+
+ /* Move to next transfer */
+ if (trans->transfer_list.next != &msg->transfers) {
+ drv_data->cur_transfer =
+ list_entry(trans->transfer_list.next,
+ struct spi_transfer,
+ transfer_list);
+ return RUNNING_STATE;
+ } else
+ return DONE_STATE;
+}
+
+/* caller already set message->status; dma and pio irqs are blocked */
+static void giveback(struct driver_data *drv_data)
+{
+ struct spi_transfer* last_transfer;
+ struct spi_message *msg;
+
+ msg = drv_data->cur_msg;
+ drv_data->cur_msg = NULL;
+ drv_data->cur_transfer = NULL;
+
+ last_transfer = list_last_entry(&msg->transfers, struct spi_transfer,
+ transfer_list);
+
+ /* Delay if requested before any change in chip select */
+ if (last_transfer->delay_usecs)
+ udelay(last_transfer->delay_usecs);
+
+ /* Drop chip select UNLESS cs_change is true or we are returning
+ * a message with an error, or next message is for another chip
+ */
+ if (!last_transfer->cs_change)
+ cs_deassert(drv_data);
+ else {
+ struct spi_message *next_msg;
+
+ /* Holding of cs was hinted, but we need to make sure
+ * the next message is for the same chip. Don't waste
+ * time with the following tests unless this was hinted.
+ *
+ * We cannot postpone this until pump_messages, because
+ * after calling msg->complete (below) the driver that
+ * sent the current message could be unloaded, which
+ * could invalidate the cs_control() callback...
+ */
+
+ /* get a pointer to the next message, if any */
+ next_msg = spi_get_next_queued_message(drv_data->master);
+
+ /* see if the next and current messages point
+ * to the same chip
+ */
+ if (next_msg && next_msg->spi != msg->spi)
+ next_msg = NULL;
+ if (!next_msg || msg->state == ERROR_STATE)
+ cs_deassert(drv_data);
+ }
+
+ drv_data->cur_chip = NULL;
+ spi_finalize_current_message(drv_data->master);
+}
+
+static void reset_sccr1(struct driver_data *drv_data)
+{
+ struct chip_data *chip = drv_data->cur_chip;
+ u32 sccr1_reg;
+
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1) & ~drv_data->int_cr1;
+ sccr1_reg &= ~SSCR1_RFT;
+ sccr1_reg |= chip->threshold;
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+}
+
+static void int_error_stop(struct driver_data *drv_data, const char* msg)
+{
+ /* Stop and reset SSP */
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ reset_sccr1(drv_data);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+ pxa2xx_spi_flush(drv_data);
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+
+ dev_err(&drv_data->pdev->dev, "%s\n", msg);
+
+ drv_data->cur_msg->state = ERROR_STATE;
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static void int_transfer_complete(struct driver_data *drv_data)
+{
+ /* Stop SSP */
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ reset_sccr1(drv_data);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ /* Update total byte transferred return count actual bytes read */
+ drv_data->cur_msg->actual_length += drv_data->len -
+ (drv_data->rx_end - drv_data->rx);
+
+ /* Transfer delays and chip select release are
+ * handled in pump_transfers or giveback
+ */
+
+ /* Move to next transfer */
+ drv_data->cur_msg->state = pxa2xx_spi_next_transfer(drv_data);
+
+ /* Schedule transfer tasklet */
+ tasklet_schedule(&drv_data->pump_transfers);
+}
+
+static irqreturn_t interrupt_transfer(struct driver_data *drv_data)
+{
+ u32 irq_mask = (pxa2xx_spi_read(drv_data, SSCR1) & SSCR1_TIE) ?
+ drv_data->mask_sr : drv_data->mask_sr & ~SSSR_TFS;
+
+ u32 irq_status = pxa2xx_spi_read(drv_data, SSSR) & irq_mask;
+
+ if (irq_status & SSSR_ROR) {
+ int_error_stop(drv_data, "interrupt_transfer: fifo overrun");
+ return IRQ_HANDLED;
+ }
+
+ if (irq_status & SSSR_TINT) {
+ pxa2xx_spi_write(drv_data, SSSR, SSSR_TINT);
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+ }
+
+ /* Drain rx fifo, Fill tx fifo and prevent overruns */
+ do {
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+ } while (drv_data->write(drv_data));
+
+ if (drv_data->read(drv_data)) {
+ int_transfer_complete(drv_data);
+ return IRQ_HANDLED;
+ }
+
+ if (drv_data->tx == drv_data->tx_end) {
+ u32 bytes_left;
+ u32 sccr1_reg;
+
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
+ sccr1_reg &= ~SSCR1_TIE;
+
+ /*
+ * PXA25x_SSP has no timeout, set up rx threshould for the
+ * remaining RX bytes.
+ */
+ if (pxa25x_ssp_comp(drv_data)) {
+ u32 rx_thre;
+
+ pxa2xx_spi_clear_rx_thre(drv_data, &sccr1_reg);
+
+ bytes_left = drv_data->rx_end - drv_data->rx;
+ switch (drv_data->n_bytes) {
+ case 4:
+ bytes_left >>= 1;
+ case 2:
+ bytes_left >>= 1;
+ }
+
+ rx_thre = pxa2xx_spi_get_rx_default_thre(drv_data);
+ if (rx_thre > bytes_left)
+ rx_thre = bytes_left;
+
+ pxa2xx_spi_set_rx_thre(drv_data, &sccr1_reg, rx_thre);
+ }
+ pxa2xx_spi_write(drv_data, SSCR1, sccr1_reg);
+ }
+
+ /* We did something */
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ssp_int(int irq, void *dev_id)
+{
+ struct driver_data *drv_data = dev_id;
+ u32 sccr1_reg;
+ u32 mask = drv_data->mask_sr;
+ u32 status;
+
+ /*
+ * The IRQ might be shared with other peripherals so we must first
+ * check that are we RPM suspended or not. If we are we assume that
+ * the IRQ was not for us (we shouldn't be RPM suspended when the
+ * interrupt is enabled).
+ */
+ if (pm_runtime_suspended(&drv_data->pdev->dev))
+ return IRQ_NONE;
+
+ /*
+ * If the device is not yet in RPM suspended state and we get an
+ * interrupt that is meant for another device, check if status bits
+ * are all set to one. That means that the device is already
+ * powered off.
+ */
+ status = pxa2xx_spi_read(drv_data, SSSR);
+ if (status == ~0)
+ return IRQ_NONE;
+
+ sccr1_reg = pxa2xx_spi_read(drv_data, SSCR1);
+
+ /* Ignore possible writes if we don't need to write */
+ if (!(sccr1_reg & SSCR1_TIE))
+ mask &= ~SSSR_TFS;
+
+ if (!(status & mask))
+ return IRQ_NONE;
+
+ if (!drv_data->cur_msg) {
+
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0)
+ & ~SSCR0_SSE);
+ pxa2xx_spi_write(drv_data, SSCR1,
+ pxa2xx_spi_read(drv_data, SSCR1)
+ & ~drv_data->int_cr1);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+
+ dev_err(&drv_data->pdev->dev,
+ "bad message state in interrupt handler\n");
+
+ /* Never fail */
+ return IRQ_HANDLED;
+ }
+
+ return drv_data->transfer_handler(drv_data);
+}
+
+/*
+ * The Quark SPI has an additional 24 bit register (DDS_CLK_RATE) to multiply
+ * input frequency by fractions of 2^24. It also has a divider by 5.
+ *
+ * There are formulas to get baud rate value for given input frequency and
+ * divider parameters, such as DDS_CLK_RATE and SCR:
+ *
+ * Fsys = 200MHz
+ *
+ * Fssp = Fsys * DDS_CLK_RATE / 2^24 (1)
+ * Baud rate = Fsclk = Fssp / (2 * (SCR + 1)) (2)
+ *
+ * DDS_CLK_RATE either 2^n or 2^n / 5.
+ * SCR is in range 0 .. 255
+ *
+ * Divisor = 5^i * 2^j * 2 * k
+ * i = [0, 1] i = 1 iff j = 0 or j > 3
+ * j = [0, 23] j = 0 iff i = 1
+ * k = [1, 256]
+ * Special case: j = 0, i = 1: Divisor = 2 / 5
+ *
+ * Accordingly to the specification the recommended values for DDS_CLK_RATE
+ * are:
+ * Case 1: 2^n, n = [0, 23]
+ * Case 2: 2^24 * 2 / 5 (0x666666)
+ * Case 3: less than or equal to 2^24 / 5 / 16 (0x33333)
+ *
+ * In all cases the lowest possible value is better.
+ *
+ * The function calculates parameters for all cases and chooses the one closest
+ * to the asked baud rate.
+ */
+static unsigned int quark_x1000_get_clk_div(int rate, u32 *dds)
+{
+ unsigned long xtal = 200000000;
+ unsigned long fref = xtal / 2; /* mandatory division by 2,
+ see (2) */
+ /* case 3 */
+ unsigned long fref1 = fref / 2; /* case 1 */
+ unsigned long fref2 = fref * 2 / 5; /* case 2 */
+ unsigned long scale;
+ unsigned long q, q1, q2;
+ long r, r1, r2;
+ u32 mul;
+
+ /* Case 1 */
+
+ /* Set initial value for DDS_CLK_RATE */
+ mul = (1 << 24) >> 1;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_CLOSEST(fref1, rate);
+
+ /* Scale q1 if it's too big */
+ if (q1 > 256) {
+ /* Scale q1 to range [1, 512] */
+ scale = fls_long(q1 - 1);
+ if (scale > 9) {
+ q1 >>= scale - 9;
+ mul >>= scale - 9;
+ }
+
+ /* Round the result if we have a remainder */
+ q1 += q1 & 1;
+ }
+
+ /* Decrease DDS_CLK_RATE as much as we can without loss in precision */
+ scale = __ffs(q1);
+ q1 >>= scale;
+ mul >>= scale;
+
+ /* Get the remainder */
+ r1 = abs(fref1 / (1 << (24 - fls_long(mul))) / q1 - rate);
+
+ /* Case 2 */
+
+ q2 = DIV_ROUND_CLOSEST(fref2, rate);
+ r2 = abs(fref2 / q2 - rate);
+
+ /*
+ * Choose the best between two: less remainder we have the better. We
+ * can't go case 2 if q2 is greater than 256 since SCR register can
+ * hold only values 0 .. 255.
+ */
+ if (r2 >= r1 || q2 > 256) {
+ /* case 1 is better */
+ r = r1;
+ q = q1;
+ } else {
+ /* case 2 is better */
+ r = r2;
+ q = q2;
+ mul = (1 << 24) * 2 / 5;
+ }
+
+ /* Check case 3 only If the divisor is big enough */
+ if (fref / rate >= 80) {
+ u64 fssp;
+ u32 m;
+
+ /* Calculate initial quot */
+ q1 = DIV_ROUND_CLOSEST(fref, rate);
+ m = (1 << 24) / q1;
+
+ /* Get the remainder */
+ fssp = (u64)fref * m;
+ do_div(fssp, 1 << 24);
+ r1 = abs(fssp - rate);
+
+ /* Choose this one if it suits better */
+ if (r1 < r) {
+ /* case 3 is better */
+ q = 1;
+ mul = m;
+ }
+ }
+
+ *dds = mul;
+ return q - 1;
+}
+
+static unsigned int ssp_get_clk_div(struct driver_data *drv_data, int rate)
+{
+ unsigned long ssp_clk = drv_data->max_clk_rate;
+ const struct ssp_device *ssp = drv_data->ssp;
+
+ rate = min_t(int, ssp_clk, rate);
+
+ if (ssp->type == PXA25x_SSP || ssp->type == CE4100_SSP)
+ return (ssp_clk / (2 * rate) - 1) & 0xff;
+ else
+ return (ssp_clk / rate - 1) & 0xfff;
+}
+
+static unsigned int pxa2xx_ssp_get_clk_div(struct driver_data *drv_data,
+ struct chip_data *chip, int rate)
+{
+ unsigned int clk_div;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ clk_div = quark_x1000_get_clk_div(rate, &chip->dds_rate);
+ break;
+ default:
+ clk_div = ssp_get_clk_div(drv_data, rate);
+ break;
+ }
+ return clk_div << 8;
+}
+
+static void pump_transfers(unsigned long data)
+{
+ struct driver_data *drv_data = (struct driver_data *)data;
+ struct spi_message *message = NULL;
+ struct spi_transfer *transfer = NULL;
+ struct spi_transfer *previous = NULL;
+ struct chip_data *chip = NULL;
+ u32 clk_div = 0;
+ u8 bits = 0;
+ u32 speed = 0;
+ u32 cr0;
+ u32 cr1;
+ u32 dma_thresh = drv_data->cur_chip->dma_threshold;
+ u32 dma_burst = drv_data->cur_chip->dma_burst_size;
+ u32 change_mask = pxa2xx_spi_get_ssrc1_change_mask(drv_data);
+
+ /* Get current state information */
+ message = drv_data->cur_msg;
+ transfer = drv_data->cur_transfer;
+ chip = drv_data->cur_chip;
+
+ /* Handle for abort */
+ if (message->state == ERROR_STATE) {
+ message->status = -EIO;
+ giveback(drv_data);
+ return;
+ }
+
+ /* Handle end of message */
+ if (message->state == DONE_STATE) {
+ message->status = 0;
+ giveback(drv_data);
+ return;
+ }
+
+ /* Delay if requested at end of transfer before CS change */
+ if (message->state == RUNNING_STATE) {
+ previous = list_entry(transfer->transfer_list.prev,
+ struct spi_transfer,
+ transfer_list);
+ if (previous->delay_usecs)
+ udelay(previous->delay_usecs);
+
+ /* Drop chip select only if cs_change is requested */
+ if (previous->cs_change)
+ cs_deassert(drv_data);
+ }
+
+ /* Check if we can DMA this transfer */
+ if (!pxa2xx_spi_dma_is_possible(transfer->len) && chip->enable_dma) {
+
+ /* reject already-mapped transfers; PIO won't always work */
+ if (message->is_dma_mapped
+ || transfer->rx_dma || transfer->tx_dma) {
+ dev_err(&drv_data->pdev->dev,
+ "pump_transfers: mapped transfer length of "
+ "%u is greater than %d\n",
+ transfer->len, MAX_DMA_LEN);
+ message->status = -EINVAL;
+ giveback(drv_data);
+ return;
+ }
+
+ /* warn ... we force this to PIO mode */
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA disabled for transfer length %ld "
+ "greater than %d\n",
+ (long)drv_data->len, MAX_DMA_LEN);
+ }
+
+ /* Setup the transfer state based on the type of transfer */
+ if (pxa2xx_spi_flush(drv_data) == 0) {
+ dev_err(&drv_data->pdev->dev, "pump_transfers: flush failed\n");
+ message->status = -EIO;
+ giveback(drv_data);
+ return;
+ }
+ drv_data->n_bytes = chip->n_bytes;
+ drv_data->tx = (void *)transfer->tx_buf;
+ drv_data->tx_end = drv_data->tx + transfer->len;
+ drv_data->rx = transfer->rx_buf;
+ drv_data->rx_end = drv_data->rx + transfer->len;
+ drv_data->rx_dma = transfer->rx_dma;
+ drv_data->tx_dma = transfer->tx_dma;
+ drv_data->len = transfer->len;
+ drv_data->write = drv_data->tx ? chip->write : null_writer;
+ drv_data->read = drv_data->rx ? chip->read : null_reader;
+
+ /* Change speed and bit per word on a per transfer */
+ cr0 = chip->cr0;
+ if (transfer->speed_hz || transfer->bits_per_word) {
+
+ bits = chip->bits_per_word;
+ speed = chip->speed_hz;
+
+ if (transfer->speed_hz)
+ speed = transfer->speed_hz;
+
+ if (transfer->bits_per_word)
+ bits = transfer->bits_per_word;
+
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, speed);
+
+ if (bits <= 8) {
+ drv_data->n_bytes = 1;
+ drv_data->read = drv_data->read != null_reader ?
+ u8_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u8_writer : null_writer;
+ } else if (bits <= 16) {
+ drv_data->n_bytes = 2;
+ drv_data->read = drv_data->read != null_reader ?
+ u16_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u16_writer : null_writer;
+ } else if (bits <= 32) {
+ drv_data->n_bytes = 4;
+ drv_data->read = drv_data->read != null_reader ?
+ u32_reader : null_reader;
+ drv_data->write = drv_data->write != null_writer ?
+ u32_writer : null_writer;
+ }
+ /* if bits/word is changed in dma mode, then must check the
+ * thresholds and burst also */
+ if (chip->enable_dma) {
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip,
+ message->spi,
+ bits, &dma_burst,
+ &dma_thresh))
+ dev_warn_ratelimited(&message->spi->dev,
+ "pump_transfers: DMA burst size reduced to match bits_per_word\n");
+ }
+
+ cr0 = pxa2xx_configure_sscr0(drv_data, clk_div, bits);
+ }
+
+ message->state = RUNNING_STATE;
+
+ drv_data->dma_mapped = 0;
+ if (pxa2xx_spi_dma_is_possible(drv_data->len))
+ drv_data->dma_mapped = pxa2xx_spi_map_dma_buffers(drv_data);
+ if (drv_data->dma_mapped) {
+
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = pxa2xx_spi_dma_transfer;
+
+ pxa2xx_spi_dma_prepare(drv_data, dma_burst);
+
+ /* Clear status and start DMA engine */
+ cr1 = chip->cr1 | dma_thresh | drv_data->dma_cr1;
+ pxa2xx_spi_write(drv_data, SSSR, drv_data->clear_sr);
+
+ pxa2xx_spi_dma_start(drv_data);
+ } else {
+ /* Ensure we have the correct interrupt handler */
+ drv_data->transfer_handler = interrupt_transfer;
+
+ /* Clear status */
+ cr1 = chip->cr1 | chip->threshold | drv_data->int_cr1;
+ write_SSSR_CS(drv_data, drv_data->clear_sr);
+ }
+
+ if (is_lpss_ssp(drv_data)) {
+ if ((pxa2xx_spi_read(drv_data, SSIRF) & 0xff)
+ != chip->lpss_rx_threshold)
+ pxa2xx_spi_write(drv_data, SSIRF,
+ chip->lpss_rx_threshold);
+ if ((pxa2xx_spi_read(drv_data, SSITF) & 0xffff)
+ != chip->lpss_tx_threshold)
+ pxa2xx_spi_write(drv_data, SSITF,
+ chip->lpss_tx_threshold);
+ }
+
+ if (is_quark_x1000_ssp(drv_data) &&
+ (pxa2xx_spi_read(drv_data, DDS_RATE) != chip->dds_rate))
+ pxa2xx_spi_write(drv_data, DDS_RATE, chip->dds_rate);
+
+ /* see if we need to reload the config registers */
+ if ((pxa2xx_spi_read(drv_data, SSCR0) != cr0)
+ || (pxa2xx_spi_read(drv_data, SSCR1) & change_mask)
+ != (cr1 & change_mask)) {
+ /* stop the SSP, and update the other bits */
+ pxa2xx_spi_write(drv_data, SSCR0, cr0 & ~SSCR0_SSE);
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+ /* first set CR1 without interrupt and service enables */
+ pxa2xx_spi_write(drv_data, SSCR1, cr1 & change_mask);
+ /* restart the SSP */
+ pxa2xx_spi_write(drv_data, SSCR0, cr0);
+
+ } else {
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, chip->timeout);
+ }
+
+ cs_assert(drv_data);
+
+ /* after chip select, release the data by enabling service
+ * requests and interrupts, without changing any mode bits */
+ pxa2xx_spi_write(drv_data, SSCR1, cr1);
+}
+
+static int pxa2xx_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct driver_data *drv_data = spi_master_get_devdata(master);
+
+ drv_data->cur_msg = msg;
+ /* Initial message state*/
+ drv_data->cur_msg->state = START_STATE;
+ drv_data->cur_transfer = list_entry(drv_data->cur_msg->transfers.next,
+ struct spi_transfer,
+ transfer_list);
+
+ /* prepare to setup the SSP, in pump_transfers, using the per
+ * chip configuration */
+ drv_data->cur_chip = spi_get_ctldata(drv_data->cur_msg->spi);
+
+ /* Mark as busy and launch transfers */
+ tasklet_schedule(&drv_data->pump_transfers);
+ return 0;
+}
+
+static int pxa2xx_spi_unprepare_transfer(struct spi_master *master)
+{
+ struct driver_data *drv_data = spi_master_get_devdata(master);
+
+ /* Disable the SSP now */
+ pxa2xx_spi_write(drv_data, SSCR0,
+ pxa2xx_spi_read(drv_data, SSCR0) & ~SSCR0_SSE);
+
+ return 0;
+}
+
+static int setup_cs(struct spi_device *spi, struct chip_data *chip,
+ struct pxa2xx_spi_chip *chip_info)
+{
+ int err = 0;
+
+ if (chip == NULL || chip_info == NULL)
+ return 0;
+
+ /* NOTE: setup() can be called multiple times, possibly with
+ * different chip_info, release previously requested GPIO
+ */
+ if (gpio_is_valid(chip->gpio_cs))
+ gpio_free(chip->gpio_cs);
+
+ /* If (*cs_control) is provided, ignore GPIO chip select */
+ if (chip_info->cs_control) {
+ chip->cs_control = chip_info->cs_control;
+ return 0;
+ }
+
+ if (gpio_is_valid(chip_info->gpio_cs)) {
+ err = gpio_request(chip_info->gpio_cs, "SPI_CS");
+ if (err) {
+ dev_err(&spi->dev, "failed to request chip select GPIO%d\n",
+ chip_info->gpio_cs);
+ return err;
+ }
+
+ chip->gpio_cs = chip_info->gpio_cs;
+ chip->gpio_cs_inverted = spi->mode & SPI_CS_HIGH;
+
+ err = gpio_direction_output(chip->gpio_cs,
+ !chip->gpio_cs_inverted);
+ }
+
+ return err;
+}
+
+static int setup(struct spi_device *spi)
+{
+ struct pxa2xx_spi_chip *chip_info = NULL;
+ struct chip_data *chip;
+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+ unsigned int clk_div;
+ uint tx_thres, tx_hi_thres, rx_thres;
+
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ tx_thres = TX_THRESH_QUARK_X1000_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_QUARK_X1000_DFLT;
+ break;
+ case LPSS_SSP:
+ tx_thres = LPSS_TX_LOTHRESH_DFLT;
+ tx_hi_thres = LPSS_TX_HITHRESH_DFLT;
+ rx_thres = LPSS_RX_THRESH_DFLT;
+ break;
+ default:
+ tx_thres = TX_THRESH_DFLT;
+ tx_hi_thres = 0;
+ rx_thres = RX_THRESH_DFLT;
+ break;
+ }
+
+ /* Only alloc on first setup */
+ chip = spi_get_ctldata(spi);
+ if (!chip) {
+ chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL);
+ if (!chip)
+ return -ENOMEM;
+
+ if (drv_data->ssp_type == CE4100_SSP) {
+ if (spi->chip_select > 4) {
+ dev_err(&spi->dev,
+ "failed setup: cs number must not be > 4.\n");
+ kfree(chip);
+ return -EINVAL;
+ }
+
+ chip->frm = spi->chip_select;
+ } else
+ chip->gpio_cs = -1;
+ chip->enable_dma = 0;
+ chip->timeout = TIMOUT_DFLT;
+ }
+
+ /* protocol drivers may change the chip settings, so...
+ * if chip_info exists, use it */
+ chip_info = spi->controller_data;
+
+ /* chip_info isn't always needed */
+ chip->cr1 = 0;
+ if (chip_info) {
+ if (chip_info->timeout)
+ chip->timeout = chip_info->timeout;
+ if (chip_info->tx_threshold)
+ tx_thres = chip_info->tx_threshold;
+ if (chip_info->tx_hi_threshold)
+ tx_hi_thres = chip_info->tx_hi_threshold;
+ if (chip_info->rx_threshold)
+ rx_thres = chip_info->rx_threshold;
+ chip->enable_dma = drv_data->master_info->enable_dma;
+ chip->dma_threshold = 0;
+ if (chip_info->enable_loopback)
+ chip->cr1 = SSCR1_LBM;
+ } else if (ACPI_HANDLE(&spi->dev)) {
+ /*
+ * Slave devices enumerated from ACPI namespace don't
+ * usually have chip_info but we still might want to use
+ * DMA with them.
+ */
+ chip->enable_dma = drv_data->master_info->enable_dma;
+ }
+
+ chip->lpss_rx_threshold = SSIRF_RxThresh(rx_thres);
+ chip->lpss_tx_threshold = SSITF_TxLoThresh(tx_thres)
+ | SSITF_TxHiThresh(tx_hi_thres);
+
+ /* set dma burst and threshold outside of chip_info path so that if
+ * chip_info goes away after setting chip->enable_dma, the
+ * burst and threshold can still respond to changes in bits_per_word */
+ if (chip->enable_dma) {
+ /* set up legal burst and threshold for dma */
+ if (pxa2xx_spi_set_dma_burst_and_threshold(chip, spi,
+ spi->bits_per_word,
+ &chip->dma_burst_size,
+ &chip->dma_threshold)) {
+ dev_warn(&spi->dev,
+ "in setup: DMA burst size reduced to match bits_per_word\n");
+ }
+ }
+
+ clk_div = pxa2xx_ssp_get_clk_div(drv_data, chip, spi->max_speed_hz);
+ chip->speed_hz = spi->max_speed_hz;
+
+ chip->cr0 = pxa2xx_configure_sscr0(drv_data, clk_div,
+ spi->bits_per_word);
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ chip->threshold = (QUARK_X1000_SSCR1_RxTresh(rx_thres)
+ & QUARK_X1000_SSCR1_RFT)
+ | (QUARK_X1000_SSCR1_TxTresh(tx_thres)
+ & QUARK_X1000_SSCR1_TFT);
+ break;
+ default:
+ chip->threshold = (SSCR1_RxTresh(rx_thres) & SSCR1_RFT) |
+ (SSCR1_TxTresh(tx_thres) & SSCR1_TFT);
+ break;
+ }
+
+ chip->cr1 &= ~(SSCR1_SPO | SSCR1_SPH);
+ chip->cr1 |= (((spi->mode & SPI_CPHA) != 0) ? SSCR1_SPH : 0)
+ | (((spi->mode & SPI_CPOL) != 0) ? SSCR1_SPO : 0);
+
+ if (spi->mode & SPI_LOOP)
+ chip->cr1 |= SSCR1_LBM;
+
+ /* NOTE: PXA25x_SSP _could_ use external clocking ... */
+ if (!pxa25x_ssp_comp(drv_data))
+ dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
+ drv_data->max_clk_rate
+ / (1 + ((chip->cr0 & SSCR0_SCR(0xfff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
+ else
+ dev_dbg(&spi->dev, "%ld Hz actual, %s\n",
+ drv_data->max_clk_rate / 2
+ / (1 + ((chip->cr0 & SSCR0_SCR(0x0ff)) >> 8)),
+ chip->enable_dma ? "DMA" : "PIO");
+
+ if (spi->bits_per_word <= 8) {
+ chip->n_bytes = 1;
+ chip->read = u8_reader;
+ chip->write = u8_writer;
+ } else if (spi->bits_per_word <= 16) {
+ chip->n_bytes = 2;
+ chip->read = u16_reader;
+ chip->write = u16_writer;
+ } else if (spi->bits_per_word <= 32) {
+ if (!is_quark_x1000_ssp(drv_data))
+ chip->cr0 |= SSCR0_EDSS;
+ chip->n_bytes = 4;
+ chip->read = u32_reader;
+ chip->write = u32_writer;
+ }
+ chip->bits_per_word = spi->bits_per_word;
+
+ spi_set_ctldata(spi, chip);
+
+ if (drv_data->ssp_type == CE4100_SSP)
+ return 0;
+
+ return setup_cs(spi, chip, chip_info);
+}
+
+static void cleanup(struct spi_device *spi)
+{
+ struct chip_data *chip = spi_get_ctldata(spi);
+ struct driver_data *drv_data = spi_master_get_devdata(spi->master);
+
+ if (!chip)
+ return;
+
+ if (drv_data->ssp_type != CE4100_SSP && gpio_is_valid(chip->gpio_cs))
+ gpio_free(chip->gpio_cs);
+
+ kfree(chip);
+}
+
+#ifdef CONFIG_ACPI
+static struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+{
+ struct pxa2xx_spi_master *pdata;
+ struct acpi_device *adev;
+ struct ssp_device *ssp;
+ struct resource *res;
+ int devid;
+
+ if (!ACPI_HANDLE(&pdev->dev) ||
+ acpi_bus_get_device(ACPI_HANDLE(&pdev->dev), &adev))
+ return NULL;
+
+ pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL);
+ if (!pdata)
+ return NULL;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res)
+ return NULL;
+
+ ssp = &pdata->ssp;
+
+ ssp->phys_base = res->start;
+ ssp->mmio_base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(ssp->mmio_base))
+ return NULL;
+
+ ssp->clk = devm_clk_get(&pdev->dev, NULL);
+ ssp->irq = platform_get_irq(pdev, 0);
+ ssp->type = LPSS_SSP;
+ ssp->pdev = pdev;
+
+ ssp->port_id = -1;
+ if (adev->pnp.unique_id && !kstrtoint(adev->pnp.unique_id, 0, &devid))
+ ssp->port_id = devid;
+
+ pdata->num_chipselect = 1;
+ pdata->enable_dma = true;
+
+ return pdata;
+}
+
+static struct acpi_device_id pxa2xx_spi_acpi_match[] = {
+ { "INT33C0", 0 },
+ { "INT33C1", 0 },
+ { "INT3430", 0 },
+ { "INT3431", 0 },
+ { "80860F0E", 0 },
+ { "8086228E", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, pxa2xx_spi_acpi_match);
+#else
+static inline struct pxa2xx_spi_master *
+pxa2xx_spi_acpi_get_pdata(struct platform_device *pdev)
+{
+ return NULL;
+}
+#endif
+
+static int pxa2xx_spi_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct pxa2xx_spi_master *platform_info;
+ struct spi_master *master;
+ struct driver_data *drv_data;
+ struct ssp_device *ssp;
+ int status;
+ u32 tmp;
+
+ platform_info = dev_get_platdata(dev);
+ if (!platform_info) {
+ platform_info = pxa2xx_spi_acpi_get_pdata(pdev);
+ if (!platform_info) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ ssp = pxa_ssp_request(pdev->id, pdev->name);
+ if (!ssp)
+ ssp = &platform_info->ssp;
+
+ if (!ssp->mmio_base) {
+ dev_err(&pdev->dev, "failed to get ssp\n");
+ return -ENODEV;
+ }
+
+ /* Allocate master with space for drv_data and null dma buffer */
+ master = spi_alloc_master(dev, sizeof(struct driver_data) + 16);
+ if (!master) {
+ dev_err(&pdev->dev, "cannot alloc spi_master\n");
+ pxa_ssp_free(ssp);
+ return -ENOMEM;
+ }
+ drv_data = spi_master_get_devdata(master);
+ drv_data->master = master;
+ drv_data->master_info = platform_info;
+ drv_data->pdev = pdev;
+ drv_data->ssp = ssp;
+
+ master->dev.parent = &pdev->dev;
+ master->dev.of_node = pdev->dev.of_node;
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+
+ master->bus_num = ssp->port_id;
+ master->num_chipselect = platform_info->num_chipselect;
+ master->dma_alignment = DMA_ALIGNMENT;
+ master->cleanup = cleanup;
+ master->setup = setup;
+ master->transfer_one_message = pxa2xx_spi_transfer_one_message;
+ master->unprepare_transfer_hardware = pxa2xx_spi_unprepare_transfer;
+ master->auto_runtime_pm = true;
+
+ drv_data->ssp_type = ssp->type;
+ drv_data->null_dma_buf = (u32 *)PTR_ALIGN(&drv_data[1], DMA_ALIGNMENT);
+
+ drv_data->ioaddr = ssp->mmio_base;
+ drv_data->ssdr_physical = ssp->phys_base + SSDR;
+ if (pxa25x_ssp_comp(drv_data)) {
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ break;
+ default:
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 16);
+ break;
+ }
+
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE;
+ drv_data->dma_cr1 = 0;
+ drv_data->clear_sr = SSSR_ROR;
+ drv_data->mask_sr = SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ } else {
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ drv_data->int_cr1 = SSCR1_TIE | SSCR1_RIE | SSCR1_TINTE;
+ drv_data->dma_cr1 = DEFAULT_DMA_CR1;
+ drv_data->clear_sr = SSSR_ROR | SSSR_TINT;
+ drv_data->mask_sr = SSSR_TINT | SSSR_RFS | SSSR_TFS | SSSR_ROR;
+ }
+
+ status = request_irq(ssp->irq, ssp_int, IRQF_SHARED, dev_name(dev),
+ drv_data);
+ if (status < 0) {
+ dev_err(&pdev->dev, "cannot get IRQ %d\n", ssp->irq);
+ goto out_error_master_alloc;
+ }
+
+ /* Setup DMA if requested */
+ drv_data->tx_channel = -1;
+ drv_data->rx_channel = -1;
+ if (platform_info->enable_dma) {
+ status = pxa2xx_spi_dma_setup(drv_data);
+ if (status) {
+ dev_dbg(dev, "no DMA channels available, using PIO\n");
+ platform_info->enable_dma = false;
+ }
+ }
+
+ /* Enable SOC clock */
+ clk_prepare_enable(ssp->clk);
+
+ drv_data->max_clk_rate = clk_get_rate(ssp->clk);
+
+ /* Load default SSP configuration */
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
+ switch (drv_data->ssp_type) {
+ case QUARK_X1000_SSP:
+ tmp = QUARK_X1000_SSCR1_RxTresh(RX_THRESH_QUARK_X1000_DFLT)
+ | QUARK_X1000_SSCR1_TxTresh(TX_THRESH_QUARK_X1000_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+
+ /* using the Motorola SPI protocol and use 8 bit frame */
+ pxa2xx_spi_write(drv_data, SSCR0,
+ QUARK_X1000_SSCR0_Motorola
+ | QUARK_X1000_SSCR0_DataSize(8));
+ break;
+ default:
+ tmp = SSCR1_RxTresh(RX_THRESH_DFLT) |
+ SSCR1_TxTresh(TX_THRESH_DFLT);
+ pxa2xx_spi_write(drv_data, SSCR1, tmp);
+ tmp = SSCR0_SCR(2) | SSCR0_Motorola | SSCR0_DataSize(8);
+ pxa2xx_spi_write(drv_data, SSCR0, tmp);
+ break;
+ }
+
+ if (!pxa25x_ssp_comp(drv_data))
+ pxa2xx_spi_write(drv_data, SSTO, 0);
+
+ if (!is_quark_x1000_ssp(drv_data))
+ pxa2xx_spi_write(drv_data, SSPSP, 0);
+
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_setup(drv_data);
+
+ tasklet_init(&drv_data->pump_transfers, pump_transfers,
+ (unsigned long)drv_data);
+
+ pm_runtime_set_autosuspend_delay(&pdev->dev, 50);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ /* Register with the SPI framework */
+ platform_set_drvdata(pdev, drv_data);
+ status = devm_spi_register_master(&pdev->dev, master);
+ if (status != 0) {
+ dev_err(&pdev->dev, "problem registering spi master\n");
+ goto out_error_clock_enabled;
+ }
+
+ return status;
+
+out_error_clock_enabled:
+ clk_disable_unprepare(ssp->clk);
+ pxa2xx_spi_dma_release(drv_data);
+ free_irq(ssp->irq, drv_data);
+
+out_error_master_alloc:
+ spi_master_put(master);
+ pxa_ssp_free(ssp);
+ return status;
+}
+
+static int pxa2xx_spi_remove(struct platform_device *pdev)
+{
+ struct driver_data *drv_data = platform_get_drvdata(pdev);
+ struct ssp_device *ssp;
+
+ if (!drv_data)
+ return 0;
+ ssp = drv_data->ssp;
+
+ pm_runtime_get_sync(&pdev->dev);
+
+ /* Disable the SSP at the peripheral and SOC level */
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
+ clk_disable_unprepare(ssp->clk);
+
+ /* Release DMA */
+ if (drv_data->master_info->enable_dma)
+ pxa2xx_spi_dma_release(drv_data);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ /* Release IRQ */
+ free_irq(ssp->irq, drv_data);
+
+ /* Release SSP */
+ pxa_ssp_free(ssp);
+
+ return 0;
+}
+
+static void pxa2xx_spi_shutdown(struct platform_device *pdev)
+{
+ int status = 0;
+
+ if ((status = pxa2xx_spi_remove(pdev)) != 0)
+ dev_err(&pdev->dev, "shutdown failed with %d\n", status);
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int pxa2xx_spi_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+ struct ssp_device *ssp = drv_data->ssp;
+ int status = 0;
+
+ status = spi_master_suspend(drv_data->master);
+ if (status != 0)
+ return status;
+ pxa2xx_spi_write(drv_data, SSCR0, 0);
+
+ if (!pm_runtime_suspended(dev))
+ clk_disable_unprepare(ssp->clk);
+
+ return 0;
+}
+
+static int pxa2xx_spi_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+ struct ssp_device *ssp = drv_data->ssp;
+ int status = 0;
+
+ pxa2xx_spi_dma_resume(drv_data);
+
+ /* Enable the SSP clock */
+ if (!pm_runtime_suspended(dev))
+ clk_prepare_enable(ssp->clk);
+
+ /* Restore LPSS private register bits */
+ if (is_lpss_ssp(drv_data))
+ lpss_ssp_setup(drv_data);
+
+ /* Start the queue running */
+ status = spi_master_resume(drv_data->master);
+ if (status != 0) {
+ dev_err(dev, "problem starting queue (%d)\n", status);
+ return status;
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int pxa2xx_spi_runtime_suspend(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_disable_unprepare(drv_data->ssp->clk);
+ return 0;
+}
+
+static int pxa2xx_spi_runtime_resume(struct device *dev)
+{
+ struct driver_data *drv_data = dev_get_drvdata(dev);
+
+ clk_prepare_enable(drv_data->ssp->clk);
+ return 0;
+}
+#endif
+
+static const struct dev_pm_ops pxa2xx_spi_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pxa2xx_spi_suspend, pxa2xx_spi_resume)
+ SET_RUNTIME_PM_OPS(pxa2xx_spi_runtime_suspend,
+ pxa2xx_spi_runtime_resume, NULL)
+};
+
+static struct platform_driver driver = {
+ .driver = {
+ .name = "pxa2xx-spi",
+ .pm = &pxa2xx_spi_pm_ops,
+ .acpi_match_table = ACPI_PTR(pxa2xx_spi_acpi_match),
+ },
+ .probe = pxa2xx_spi_probe,
+ .remove = pxa2xx_spi_remove,
+ .shutdown = pxa2xx_spi_shutdown,
+};
+
+static int __init pxa2xx_spi_init(void)
+{
+ return platform_driver_register(&driver);
+}
+subsys_initcall(pxa2xx_spi_init);
+
+static void __exit pxa2xx_spi_exit(void)
+{
+ platform_driver_unregister(&driver);
+}
+module_exit(pxa2xx_spi_exit);
diff --git a/drivers/spi/spi-pxa2xx.h b/drivers/spi/spi-pxa2xx.h
new file mode 100644
index 000000000..85a58c906
--- /dev/null
+++ b/drivers/spi/spi-pxa2xx.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ * Copyright (C) 2013, Intel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef SPI_PXA2XX_H
+#define SPI_PXA2XX_H
+
+#include <linux/atomic.h>
+#include <linux/dmaengine.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pxa2xx_ssp.h>
+#include <linux/scatterlist.h>
+#include <linux/sizes.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/pxa2xx_spi.h>
+
+struct driver_data {
+ /* Driver model hookup */
+ struct platform_device *pdev;
+
+ /* SSP Info */
+ struct ssp_device *ssp;
+
+ /* SPI framework hookup */
+ enum pxa_ssp_type ssp_type;
+ struct spi_master *master;
+
+ /* PXA hookup */
+ struct pxa2xx_spi_master *master_info;
+
+ /* PXA private DMA setup stuff */
+ int rx_channel;
+ int tx_channel;
+ u32 *null_dma_buf;
+
+ /* SSP register addresses */
+ void __iomem *ioaddr;
+ u32 ssdr_physical;
+
+ /* SSP masks*/
+ u32 dma_cr1;
+ u32 int_cr1;
+ u32 clear_sr;
+ u32 mask_sr;
+
+ /* Maximun clock rate */
+ unsigned long max_clk_rate;
+
+ /* Message Transfer pump */
+ struct tasklet_struct pump_transfers;
+
+ /* DMA engine support */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ struct sg_table rx_sgt;
+ struct sg_table tx_sgt;
+ int rx_nents;
+ int tx_nents;
+ void *dummy;
+ atomic_t dma_running;
+
+ /* Current message transfer state info */
+ struct spi_message *cur_msg;
+ struct spi_transfer *cur_transfer;
+ struct chip_data *cur_chip;
+ size_t len;
+ void *tx;
+ void *tx_end;
+ void *rx;
+ void *rx_end;
+ int dma_mapped;
+ dma_addr_t rx_dma;
+ dma_addr_t tx_dma;
+ size_t rx_map_len;
+ size_t tx_map_len;
+ u8 n_bytes;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ irqreturn_t (*transfer_handler)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+
+ void __iomem *lpss_base;
+};
+
+struct chip_data {
+ u32 cr0;
+ u32 cr1;
+ u32 dds_rate;
+ u32 psp;
+ u32 timeout;
+ u8 n_bytes;
+ u32 dma_burst_size;
+ u32 threshold;
+ u32 dma_threshold;
+ u16 lpss_rx_threshold;
+ u16 lpss_tx_threshold;
+ u8 enable_dma;
+ u8 bits_per_word;
+ u32 speed_hz;
+ union {
+ int gpio_cs;
+ unsigned int frm;
+ };
+ int gpio_cs_inverted;
+ int (*write)(struct driver_data *drv_data);
+ int (*read)(struct driver_data *drv_data);
+ void (*cs_control)(u32 command);
+};
+
+static inline u32 pxa2xx_spi_read(const struct driver_data *drv_data,
+ unsigned reg)
+{
+ return __raw_readl(drv_data->ioaddr + reg);
+}
+
+static inline void pxa2xx_spi_write(const struct driver_data *drv_data,
+ unsigned reg, u32 val)
+{
+ __raw_writel(val, drv_data->ioaddr + reg);
+}
+
+#define START_STATE ((void *)0)
+#define RUNNING_STATE ((void *)1)
+#define DONE_STATE ((void *)2)
+#define ERROR_STATE ((void *)-1)
+
+#define IS_DMA_ALIGNED(x) IS_ALIGNED((unsigned long)(x), DMA_ALIGNMENT)
+#define DMA_ALIGNMENT 8
+
+static inline int pxa25x_ssp_comp(struct driver_data *drv_data)
+{
+ switch (drv_data->ssp_type) {
+ case PXA25x_SSP:
+ case CE4100_SSP:
+ case QUARK_X1000_SSP:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+static inline void write_SSSR_CS(struct driver_data *drv_data, u32 val)
+{
+ if (drv_data->ssp_type == CE4100_SSP ||
+ drv_data->ssp_type == QUARK_X1000_SSP)
+ val |= pxa2xx_spi_read(drv_data, SSSR) & SSSR_ALT_FRM_MASK;
+
+ pxa2xx_spi_write(drv_data, SSSR, val);
+}
+
+extern int pxa2xx_spi_flush(struct driver_data *drv_data);
+extern void *pxa2xx_spi_next_transfer(struct driver_data *drv_data);
+
+/*
+ * Select the right DMA implementation.
+ */
+#if defined(CONFIG_SPI_PXA2XX_PXADMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN 8191
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TINTE)
+#elif defined(CONFIG_SPI_PXA2XX_DMA)
+#define SPI_PXA2XX_USE_DMA 1
+#define MAX_DMA_LEN SZ_64K
+#define DEFAULT_DMA_CR1 (SSCR1_TSRE | SSCR1_RSRE | SSCR1_TRAIL)
+#else
+#undef SPI_PXA2XX_USE_DMA
+#define MAX_DMA_LEN 0
+#define DEFAULT_DMA_CR1 0
+#endif
+
+#ifdef SPI_PXA2XX_USE_DMA
+extern bool pxa2xx_spi_dma_is_possible(size_t len);
+extern int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data);
+extern irqreturn_t pxa2xx_spi_dma_transfer(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_prepare(struct driver_data *drv_data, u32 dma_burst);
+extern void pxa2xx_spi_dma_start(struct driver_data *drv_data);
+extern int pxa2xx_spi_dma_setup(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_release(struct driver_data *drv_data);
+extern void pxa2xx_spi_dma_resume(struct driver_data *drv_data);
+extern int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold);
+#else
+static inline bool pxa2xx_spi_dma_is_possible(size_t len) { return false; }
+static inline int pxa2xx_spi_map_dma_buffers(struct driver_data *drv_data)
+{
+ return 0;
+}
+#define pxa2xx_spi_dma_transfer NULL
+static inline void pxa2xx_spi_dma_prepare(struct driver_data *drv_data,
+ u32 dma_burst) {}
+static inline void pxa2xx_spi_dma_start(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_dma_setup(struct driver_data *drv_data)
+{
+ return 0;
+}
+static inline void pxa2xx_spi_dma_release(struct driver_data *drv_data) {}
+static inline void pxa2xx_spi_dma_resume(struct driver_data *drv_data) {}
+static inline int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
+ struct spi_device *spi,
+ u8 bits_per_word,
+ u32 *burst_code,
+ u32 *threshold)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* SPI_PXA2XX_H */
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
new file mode 100644
index 000000000..810a7fae3
--- /dev/null
+++ b/drivers/spi/spi-qup.c
@@ -0,0 +1,1050 @@
+/*
+ * Copyright (c) 2008-2014, The Linux foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License rev 2 and
+ * only rev 2 as published by the free Software foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or fITNESS fOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+
+#define QUP_CONFIG 0x0000
+#define QUP_STATE 0x0004
+#define QUP_IO_M_MODES 0x0008
+#define QUP_SW_RESET 0x000c
+#define QUP_OPERATIONAL 0x0018
+#define QUP_ERROR_FLAGS 0x001c
+#define QUP_ERROR_FLAGS_EN 0x0020
+#define QUP_OPERATIONAL_MASK 0x0028
+#define QUP_HW_VERSION 0x0030
+#define QUP_MX_OUTPUT_CNT 0x0100
+#define QUP_OUTPUT_FIFO 0x0110
+#define QUP_MX_WRITE_CNT 0x0150
+#define QUP_MX_INPUT_CNT 0x0200
+#define QUP_MX_READ_CNT 0x0208
+#define QUP_INPUT_FIFO 0x0218
+
+#define SPI_CONFIG 0x0300
+#define SPI_IO_CONTROL 0x0304
+#define SPI_ERROR_FLAGS 0x0308
+#define SPI_ERROR_FLAGS_EN 0x030c
+
+/* QUP_CONFIG fields */
+#define QUP_CONFIG_SPI_MODE (1 << 8)
+#define QUP_CONFIG_CLOCK_AUTO_GATE BIT(13)
+#define QUP_CONFIG_NO_INPUT BIT(7)
+#define QUP_CONFIG_NO_OUTPUT BIT(6)
+#define QUP_CONFIG_N 0x001f
+
+/* QUP_STATE fields */
+#define QUP_STATE_VALID BIT(2)
+#define QUP_STATE_RESET 0
+#define QUP_STATE_RUN 1
+#define QUP_STATE_PAUSE 3
+#define QUP_STATE_MASK 3
+#define QUP_STATE_CLEAR 2
+
+#define QUP_HW_VERSION_2_1_1 0x20010001
+
+/* QUP_IO_M_MODES fields */
+#define QUP_IO_M_PACK_EN BIT(15)
+#define QUP_IO_M_UNPACK_EN BIT(14)
+#define QUP_IO_M_INPUT_MODE_MASK_SHIFT 12
+#define QUP_IO_M_OUTPUT_MODE_MASK_SHIFT 10
+#define QUP_IO_M_INPUT_MODE_MASK (3 << QUP_IO_M_INPUT_MODE_MASK_SHIFT)
+#define QUP_IO_M_OUTPUT_MODE_MASK (3 << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT)
+
+#define QUP_IO_M_OUTPUT_BLOCK_SIZE(x) (((x) & (0x03 << 0)) >> 0)
+#define QUP_IO_M_OUTPUT_FIFO_SIZE(x) (((x) & (0x07 << 2)) >> 2)
+#define QUP_IO_M_INPUT_BLOCK_SIZE(x) (((x) & (0x03 << 5)) >> 5)
+#define QUP_IO_M_INPUT_FIFO_SIZE(x) (((x) & (0x07 << 7)) >> 7)
+
+#define QUP_IO_M_MODE_FIFO 0
+#define QUP_IO_M_MODE_BLOCK 1
+#define QUP_IO_M_MODE_DMOV 2
+#define QUP_IO_M_MODE_BAM 3
+
+/* QUP_OPERATIONAL fields */
+#define QUP_OP_MAX_INPUT_DONE_FLAG BIT(11)
+#define QUP_OP_MAX_OUTPUT_DONE_FLAG BIT(10)
+#define QUP_OP_IN_SERVICE_FLAG BIT(9)
+#define QUP_OP_OUT_SERVICE_FLAG BIT(8)
+#define QUP_OP_IN_FIFO_FULL BIT(7)
+#define QUP_OP_OUT_FIFO_FULL BIT(6)
+#define QUP_OP_IN_FIFO_NOT_EMPTY BIT(5)
+#define QUP_OP_OUT_FIFO_NOT_EMPTY BIT(4)
+
+/* QUP_ERROR_FLAGS and QUP_ERROR_FLAGS_EN fields */
+#define QUP_ERROR_OUTPUT_OVER_RUN BIT(5)
+#define QUP_ERROR_INPUT_UNDER_RUN BIT(4)
+#define QUP_ERROR_OUTPUT_UNDER_RUN BIT(3)
+#define QUP_ERROR_INPUT_OVER_RUN BIT(2)
+
+/* SPI_CONFIG fields */
+#define SPI_CONFIG_HS_MODE BIT(10)
+#define SPI_CONFIG_INPUT_FIRST BIT(9)
+#define SPI_CONFIG_LOOPBACK BIT(8)
+
+/* SPI_IO_CONTROL fields */
+#define SPI_IO_C_FORCE_CS BIT(11)
+#define SPI_IO_C_CLK_IDLE_HIGH BIT(10)
+#define SPI_IO_C_MX_CS_MODE BIT(8)
+#define SPI_IO_C_CS_N_POLARITY_0 BIT(4)
+#define SPI_IO_C_CS_SELECT(x) (((x) & 3) << 2)
+#define SPI_IO_C_CS_SELECT_MASK 0x000c
+#define SPI_IO_C_TRISTATE_CS BIT(1)
+#define SPI_IO_C_NO_TRI_STATE BIT(0)
+
+/* SPI_ERROR_FLAGS and SPI_ERROR_FLAGS_EN fields */
+#define SPI_ERROR_CLK_OVER_RUN BIT(1)
+#define SPI_ERROR_CLK_UNDER_RUN BIT(0)
+
+#define SPI_NUM_CHIPSELECTS 4
+
+#define SPI_MAX_DMA_XFER (SZ_64K - 64)
+
+/* high speed mode is when bus rate is greater then 26MHz */
+#define SPI_HS_MIN_RATE 26000000
+#define SPI_MAX_RATE 50000000
+
+#define SPI_DELAY_THRESHOLD 1
+#define SPI_DELAY_RETRY 10
+
+struct spi_qup {
+ void __iomem *base;
+ struct device *dev;
+ struct clk *cclk; /* core clock */
+ struct clk *iclk; /* interface clock */
+ int irq;
+ spinlock_t lock;
+
+ int in_fifo_sz;
+ int out_fifo_sz;
+ int in_blk_sz;
+ int out_blk_sz;
+
+ struct spi_transfer *xfer;
+ struct completion done;
+ int error;
+ int w_size; /* bytes per SPI word */
+ int n_words;
+ int tx_bytes;
+ int rx_bytes;
+ int qup_v1;
+
+ int use_dma;
+ struct dma_slave_config rx_conf;
+ struct dma_slave_config tx_conf;
+};
+
+
+static inline bool spi_qup_is_valid_state(struct spi_qup *controller)
+{
+ u32 opstate = readl_relaxed(controller->base + QUP_STATE);
+
+ return opstate & QUP_STATE_VALID;
+}
+
+static int spi_qup_set_state(struct spi_qup *controller, u32 state)
+{
+ unsigned long loop;
+ u32 cur_state;
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ if (loop)
+ dev_dbg(controller->dev, "invalid state for %ld,us %d\n",
+ loop, state);
+
+ cur_state = readl_relaxed(controller->base + QUP_STATE);
+ /*
+ * Per spec: for PAUSE_STATE to RESET_STATE, two writes
+ * of (b10) are required
+ */
+ if (((cur_state & QUP_STATE_MASK) == QUP_STATE_PAUSE) &&
+ (state == QUP_STATE_RESET)) {
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ writel_relaxed(QUP_STATE_CLEAR, controller->base + QUP_STATE);
+ } else {
+ cur_state &= ~QUP_STATE_MASK;
+ cur_state |= state;
+ writel_relaxed(cur_state, controller->base + QUP_STATE);
+ }
+
+ loop = 0;
+ while (!spi_qup_is_valid_state(controller)) {
+
+ usleep_range(SPI_DELAY_THRESHOLD, SPI_DELAY_THRESHOLD * 2);
+
+ if (++loop > SPI_DELAY_RETRY)
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void spi_qup_fifo_read(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ u8 *rx_buf = xfer->rx_buf;
+ u32 word, state;
+ int idx, shift, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->rx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (0 == (state & QUP_OP_IN_FIFO_NOT_EMPTY))
+ break;
+
+ word = readl_relaxed(controller->base + QUP_INPUT_FIFO);
+
+ if (!rx_buf) {
+ controller->rx_bytes += w_size;
+ continue;
+ }
+
+ for (idx = 0; idx < w_size; idx++, controller->rx_bytes++) {
+ /*
+ * The data format depends on bytes per SPI word:
+ * 4 bytes: 0x12345678
+ * 2 bytes: 0x00001234
+ * 1 byte : 0x00000012
+ */
+ shift = BITS_PER_BYTE;
+ shift *= (w_size - idx - 1);
+ rx_buf[controller->rx_bytes] = word >> shift;
+ }
+ }
+}
+
+static void spi_qup_fifo_write(struct spi_qup *controller,
+ struct spi_transfer *xfer)
+{
+ const u8 *tx_buf = xfer->tx_buf;
+ u32 word, state, data;
+ int idx, w_size;
+
+ w_size = controller->w_size;
+
+ while (controller->tx_bytes < xfer->len) {
+
+ state = readl_relaxed(controller->base + QUP_OPERATIONAL);
+ if (state & QUP_OP_OUT_FIFO_FULL)
+ break;
+
+ word = 0;
+ for (idx = 0; idx < w_size; idx++, controller->tx_bytes++) {
+
+ if (!tx_buf) {
+ controller->tx_bytes += w_size;
+ break;
+ }
+
+ data = tx_buf[controller->tx_bytes];
+ word |= data << (BITS_PER_BYTE * (3 - idx));
+ }
+
+ writel_relaxed(word, controller->base + QUP_OUTPUT_FIFO);
+ }
+}
+
+static void spi_qup_dma_done(void *data)
+{
+ struct spi_qup *qup = data;
+
+ complete(&qup->done);
+}
+
+static int spi_qup_prep_sg(struct spi_master *master, struct spi_transfer *xfer,
+ enum dma_transfer_direction dir,
+ dma_async_tx_callback callback)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ unsigned long flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE;
+ struct dma_async_tx_descriptor *desc;
+ struct scatterlist *sgl;
+ struct dma_chan *chan;
+ dma_cookie_t cookie;
+ unsigned int nents;
+
+ if (dir == DMA_MEM_TO_DEV) {
+ chan = master->dma_tx;
+ nents = xfer->tx_sg.nents;
+ sgl = xfer->tx_sg.sgl;
+ } else {
+ chan = master->dma_rx;
+ nents = xfer->rx_sg.nents;
+ sgl = xfer->rx_sg.sgl;
+ }
+
+ desc = dmaengine_prep_slave_sg(chan, sgl, nents, dir, flags);
+ if (!desc)
+ return -EINVAL;
+
+ desc->callback = callback;
+ desc->callback_param = qup;
+
+ cookie = dmaengine_submit(desc);
+
+ return dma_submit_error(cookie);
+}
+
+static void spi_qup_dma_terminate(struct spi_master *master,
+ struct spi_transfer *xfer)
+{
+ if (xfer->tx_buf)
+ dmaengine_terminate_all(master->dma_tx);
+ if (xfer->rx_buf)
+ dmaengine_terminate_all(master->dma_rx);
+}
+
+static int spi_qup_do_dma(struct spi_master *master, struct spi_transfer *xfer)
+{
+ dma_async_tx_callback rx_done = NULL, tx_done = NULL;
+ int ret;
+
+ if (xfer->rx_buf)
+ rx_done = spi_qup_dma_done;
+ else if (xfer->tx_buf)
+ tx_done = spi_qup_dma_done;
+
+ if (xfer->rx_buf) {
+ ret = spi_qup_prep_sg(master, xfer, DMA_DEV_TO_MEM, rx_done);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(master->dma_rx);
+ }
+
+ if (xfer->tx_buf) {
+ ret = spi_qup_prep_sg(master, xfer, DMA_MEM_TO_DEV, tx_done);
+ if (ret)
+ return ret;
+
+ dma_async_issue_pending(master->dma_tx);
+ }
+
+ return 0;
+}
+
+static int spi_qup_do_pio(struct spi_master *master, struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_qup_set_state(qup, QUP_STATE_RUN);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set RUN state\n");
+ return ret;
+ }
+
+ ret = spi_qup_set_state(qup, QUP_STATE_PAUSE);
+ if (ret) {
+ dev_warn(qup->dev, "cannot set PAUSE state\n");
+ return ret;
+ }
+
+ spi_qup_fifo_write(qup, xfer);
+
+ return 0;
+}
+
+static irqreturn_t spi_qup_qup_irq(int irq, void *dev_id)
+{
+ struct spi_qup *controller = dev_id;
+ struct spi_transfer *xfer;
+ u32 opflags, qup_err, spi_err;
+ unsigned long flags;
+ int error = 0;
+
+ spin_lock_irqsave(&controller->lock, flags);
+ xfer = controller->xfer;
+ controller->xfer = NULL;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ qup_err = readl_relaxed(controller->base + QUP_ERROR_FLAGS);
+ spi_err = readl_relaxed(controller->base + SPI_ERROR_FLAGS);
+ opflags = readl_relaxed(controller->base + QUP_OPERATIONAL);
+
+ writel_relaxed(qup_err, controller->base + QUP_ERROR_FLAGS);
+ writel_relaxed(spi_err, controller->base + SPI_ERROR_FLAGS);
+ writel_relaxed(opflags, controller->base + QUP_OPERATIONAL);
+
+ if (!xfer) {
+ dev_err_ratelimited(controller->dev, "unexpected irq %08x %08x %08x\n",
+ qup_err, spi_err, opflags);
+ return IRQ_HANDLED;
+ }
+
+ if (qup_err) {
+ if (qup_err & QUP_ERROR_OUTPUT_OVER_RUN)
+ dev_warn(controller->dev, "OUTPUT_OVER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_UNDER_RUN)
+ dev_warn(controller->dev, "INPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_OUTPUT_UNDER_RUN)
+ dev_warn(controller->dev, "OUTPUT_UNDER_RUN\n");
+ if (qup_err & QUP_ERROR_INPUT_OVER_RUN)
+ dev_warn(controller->dev, "INPUT_OVER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (spi_err) {
+ if (spi_err & SPI_ERROR_CLK_OVER_RUN)
+ dev_warn(controller->dev, "CLK_OVER_RUN\n");
+ if (spi_err & SPI_ERROR_CLK_UNDER_RUN)
+ dev_warn(controller->dev, "CLK_UNDER_RUN\n");
+
+ error = -EIO;
+ }
+
+ if (!controller->use_dma) {
+ if (opflags & QUP_OP_IN_SERVICE_FLAG)
+ spi_qup_fifo_read(controller, xfer);
+
+ if (opflags & QUP_OP_OUT_SERVICE_FLAG)
+ spi_qup_fifo_write(controller, xfer);
+ }
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->error = error;
+ controller->xfer = xfer;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->rx_bytes == xfer->len || error)
+ complete(&controller->done);
+
+ return IRQ_HANDLED;
+}
+
+static u32
+spi_qup_get_mode(struct spi_master *master, struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ u32 mode;
+
+ qup->w_size = 4;
+
+ if (xfer->bits_per_word <= 8)
+ qup->w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ qup->w_size = 2;
+
+ qup->n_words = xfer->len / qup->w_size;
+
+ if (qup->n_words <= (qup->in_fifo_sz / sizeof(u32)))
+ mode = QUP_IO_M_MODE_FIFO;
+ else
+ mode = QUP_IO_M_MODE_BLOCK;
+
+ return mode;
+}
+
+/* set clock freq ... bits per word */
+static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(spi->master);
+ u32 config, iomode, mode, control;
+ int ret, n_words;
+
+ if (spi->mode & SPI_LOOP && xfer->len > controller->in_fifo_sz) {
+ dev_err(controller->dev, "too big size for loopback %d > %d\n",
+ xfer->len, controller->in_fifo_sz);
+ return -EIO;
+ }
+
+ ret = clk_set_rate(controller->cclk, xfer->speed_hz);
+ if (ret) {
+ dev_err(controller->dev, "fail to set frequency %d",
+ xfer->speed_hz);
+ return -EIO;
+ }
+
+ if (spi_qup_set_state(controller, QUP_STATE_RESET)) {
+ dev_err(controller->dev, "cannot set RESET state\n");
+ return -EIO;
+ }
+
+ mode = spi_qup_get_mode(spi->master, xfer);
+ n_words = controller->n_words;
+
+ if (mode == QUP_IO_M_MODE_FIFO) {
+ writel_relaxed(n_words, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_WRITE_CNT);
+ /* must be zero for FIFO */
+ writel_relaxed(0, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ } else if (!controller->use_dma) {
+ writel_relaxed(n_words, controller->base + QUP_MX_INPUT_CNT);
+ writel_relaxed(n_words, controller->base + QUP_MX_OUTPUT_CNT);
+ /* must be zero for BLOCK and BAM */
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+ } else {
+ mode = QUP_IO_M_MODE_BAM;
+ writel_relaxed(0, controller->base + QUP_MX_READ_CNT);
+ writel_relaxed(0, controller->base + QUP_MX_WRITE_CNT);
+
+ if (!controller->qup_v1) {
+ void __iomem *input_cnt;
+
+ input_cnt = controller->base + QUP_MX_INPUT_CNT;
+ /*
+ * for DMA transfers, both QUP_MX_INPUT_CNT and
+ * QUP_MX_OUTPUT_CNT must be zero to all cases but one.
+ * That case is a non-balanced transfer when there is
+ * only a rx_buf.
+ */
+ if (xfer->tx_buf)
+ writel_relaxed(0, input_cnt);
+ else
+ writel_relaxed(n_words, input_cnt);
+
+ writel_relaxed(0, controller->base + QUP_MX_OUTPUT_CNT);
+ }
+ }
+
+ iomode = readl_relaxed(controller->base + QUP_IO_M_MODES);
+ /* Set input and output transfer mode */
+ iomode &= ~(QUP_IO_M_INPUT_MODE_MASK | QUP_IO_M_OUTPUT_MODE_MASK);
+
+ if (!controller->use_dma)
+ iomode &= ~(QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN);
+ else
+ iomode |= QUP_IO_M_PACK_EN | QUP_IO_M_UNPACK_EN;
+
+ iomode |= (mode << QUP_IO_M_OUTPUT_MODE_MASK_SHIFT);
+ iomode |= (mode << QUP_IO_M_INPUT_MODE_MASK_SHIFT);
+
+ writel_relaxed(iomode, controller->base + QUP_IO_M_MODES);
+
+ control = readl_relaxed(controller->base + SPI_IO_CONTROL);
+
+ if (spi->mode & SPI_CPOL)
+ control |= SPI_IO_C_CLK_IDLE_HIGH;
+ else
+ control &= ~SPI_IO_C_CLK_IDLE_HIGH;
+
+ writel_relaxed(control, controller->base + SPI_IO_CONTROL);
+
+ config = readl_relaxed(controller->base + SPI_CONFIG);
+
+ if (spi->mode & SPI_LOOP)
+ config |= SPI_CONFIG_LOOPBACK;
+ else
+ config &= ~SPI_CONFIG_LOOPBACK;
+
+ if (spi->mode & SPI_CPHA)
+ config &= ~SPI_CONFIG_INPUT_FIRST;
+ else
+ config |= SPI_CONFIG_INPUT_FIRST;
+
+ /*
+ * HS_MODE improves signal stability for spi-clk high rates,
+ * but is invalid in loop back mode.
+ */
+ if ((xfer->speed_hz >= SPI_HS_MIN_RATE) && !(spi->mode & SPI_LOOP))
+ config |= SPI_CONFIG_HS_MODE;
+ else
+ config &= ~SPI_CONFIG_HS_MODE;
+
+ writel_relaxed(config, controller->base + SPI_CONFIG);
+
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~(QUP_CONFIG_NO_INPUT | QUP_CONFIG_NO_OUTPUT | QUP_CONFIG_N);
+ config |= xfer->bits_per_word - 1;
+ config |= QUP_CONFIG_SPI_MODE;
+
+ if (controller->use_dma) {
+ if (!xfer->tx_buf)
+ config |= QUP_CONFIG_NO_OUTPUT;
+ if (!xfer->rx_buf)
+ config |= QUP_CONFIG_NO_INPUT;
+ }
+
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+
+ /* only write to OPERATIONAL_MASK when register is present */
+ if (!controller->qup_v1) {
+ u32 mask = 0;
+
+ /*
+ * mask INPUT and OUTPUT service flags to prevent IRQs on FIFO
+ * status change in BAM mode
+ */
+
+ if (mode == QUP_IO_M_MODE_BAM)
+ mask = QUP_OP_IN_SERVICE_FLAG | QUP_OP_OUT_SERVICE_FLAG;
+
+ writel_relaxed(mask, controller->base + QUP_OPERATIONAL_MASK);
+ }
+
+ return 0;
+}
+
+static int spi_qup_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ unsigned long timeout, flags;
+ int ret = -EIO;
+
+ ret = spi_qup_io_config(spi, xfer);
+ if (ret)
+ return ret;
+
+ timeout = DIV_ROUND_UP(xfer->speed_hz, MSEC_PER_SEC);
+ timeout = DIV_ROUND_UP(xfer->len * 8, timeout);
+ timeout = 100 * msecs_to_jiffies(timeout);
+
+ reinit_completion(&controller->done);
+
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = xfer;
+ controller->error = 0;
+ controller->rx_bytes = 0;
+ controller->tx_bytes = 0;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (controller->use_dma)
+ ret = spi_qup_do_dma(master, xfer);
+ else
+ ret = spi_qup_do_pio(master, xfer);
+
+ if (ret)
+ goto exit;
+
+ if (spi_qup_set_state(controller, QUP_STATE_RUN)) {
+ dev_warn(controller->dev, "cannot set EXECUTE state\n");
+ goto exit;
+ }
+
+ if (!wait_for_completion_timeout(&controller->done, timeout))
+ ret = -ETIMEDOUT;
+
+exit:
+ spi_qup_set_state(controller, QUP_STATE_RESET);
+ spin_lock_irqsave(&controller->lock, flags);
+ controller->xfer = NULL;
+ if (!ret)
+ ret = controller->error;
+ spin_unlock_irqrestore(&controller->lock, flags);
+
+ if (ret && controller->use_dma)
+ spi_qup_dma_terminate(master, xfer);
+
+ return ret;
+}
+
+static bool spi_qup_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct spi_qup *qup = spi_master_get_devdata(master);
+ size_t dma_align = dma_get_cache_alignment();
+ u32 mode;
+
+ qup->use_dma = 0;
+
+ if (xfer->rx_buf && (xfer->len % qup->in_blk_sz ||
+ IS_ERR_OR_NULL(master->dma_rx) ||
+ !IS_ALIGNED((size_t)xfer->rx_buf, dma_align)))
+ return false;
+
+ if (xfer->tx_buf && (xfer->len % qup->out_blk_sz ||
+ IS_ERR_OR_NULL(master->dma_tx) ||
+ !IS_ALIGNED((size_t)xfer->tx_buf, dma_align)))
+ return false;
+
+ mode = spi_qup_get_mode(master, xfer);
+ if (mode == QUP_IO_M_MODE_FIFO)
+ return false;
+
+ qup->use_dma = 1;
+
+ return true;
+}
+
+static void spi_qup_release_dma(struct spi_master *master)
+{
+ if (!IS_ERR_OR_NULL(master->dma_rx))
+ dma_release_channel(master->dma_rx);
+ if (!IS_ERR_OR_NULL(master->dma_tx))
+ dma_release_channel(master->dma_tx);
+}
+
+static int spi_qup_init_dma(struct spi_master *master, resource_size_t base)
+{
+ struct spi_qup *spi = spi_master_get_devdata(master);
+ struct dma_slave_config *rx_conf = &spi->rx_conf,
+ *tx_conf = &spi->tx_conf;
+ struct device *dev = spi->dev;
+ int ret;
+
+ /* allocate dma resources, if available */
+ master->dma_rx = dma_request_slave_channel_reason(dev, "rx");
+ if (IS_ERR(master->dma_rx))
+ return PTR_ERR(master->dma_rx);
+
+ master->dma_tx = dma_request_slave_channel_reason(dev, "tx");
+ if (IS_ERR(master->dma_tx)) {
+ ret = PTR_ERR(master->dma_tx);
+ goto err_tx;
+ }
+
+ /* set DMA parameters */
+ rx_conf->direction = DMA_DEV_TO_MEM;
+ rx_conf->device_fc = 1;
+ rx_conf->src_addr = base + QUP_INPUT_FIFO;
+ rx_conf->src_maxburst = spi->in_blk_sz;
+
+ tx_conf->direction = DMA_MEM_TO_DEV;
+ tx_conf->device_fc = 1;
+ tx_conf->dst_addr = base + QUP_OUTPUT_FIFO;
+ tx_conf->dst_maxburst = spi->out_blk_sz;
+
+ ret = dmaengine_slave_config(master->dma_rx, rx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure RX channel\n");
+ goto err;
+ }
+
+ ret = dmaengine_slave_config(master->dma_tx, tx_conf);
+ if (ret) {
+ dev_err(dev, "failed to configure TX channel\n");
+ goto err;
+ }
+
+ return 0;
+
+err:
+ dma_release_channel(master->dma_tx);
+err_tx:
+ dma_release_channel(master->dma_rx);
+ return ret;
+}
+
+static int spi_qup_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct clk *iclk, *cclk;
+ struct spi_qup *controller;
+ struct resource *res;
+ struct device *dev;
+ void __iomem *base;
+ u32 max_freq, iomode, num_cs;
+ int ret, irq, size;
+
+ dev = &pdev->dev;
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return irq;
+
+ cclk = devm_clk_get(dev, "core");
+ if (IS_ERR(cclk))
+ return PTR_ERR(cclk);
+
+ iclk = devm_clk_get(dev, "iface");
+ if (IS_ERR(iclk))
+ return PTR_ERR(iclk);
+
+ /* This is optional parameter */
+ if (of_property_read_u32(dev->of_node, "spi-max-frequency", &max_freq))
+ max_freq = SPI_MAX_RATE;
+
+ if (!max_freq || max_freq > SPI_MAX_RATE) {
+ dev_err(dev, "invalid clock frequency %d\n", max_freq);
+ return -ENXIO;
+ }
+
+ ret = clk_prepare_enable(cclk);
+ if (ret) {
+ dev_err(dev, "cannot enable core clock\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(iclk);
+ if (ret) {
+ clk_disable_unprepare(cclk);
+ dev_err(dev, "cannot enable iface clock\n");
+ return ret;
+ }
+
+ master = spi_alloc_master(dev, sizeof(struct spi_qup));
+ if (!master) {
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ dev_err(dev, "cannot allocate master\n");
+ return -ENOMEM;
+ }
+
+ /* use num-cs unless not present or out of range */
+ if (of_property_read_u32(dev->of_node, "num-cs", &num_cs) ||
+ num_cs > SPI_NUM_CHIPSELECTS)
+ master->num_chipselect = SPI_NUM_CHIPSELECTS;
+ else
+ master->num_chipselect = num_cs;
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
+ master->max_speed_hz = max_freq;
+ master->transfer_one = spi_qup_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->dma_alignment = dma_get_cache_alignment();
+ master->max_dma_len = SPI_MAX_DMA_XFER;
+
+ platform_set_drvdata(pdev, master);
+
+ controller = spi_master_get_devdata(master);
+
+ controller->dev = dev;
+ controller->base = base;
+ controller->iclk = iclk;
+ controller->cclk = cclk;
+ controller->irq = irq;
+
+ ret = spi_qup_init_dma(master, res->start);
+ if (ret == -EPROBE_DEFER)
+ goto error;
+ else if (!ret)
+ master->can_dma = spi_qup_can_dma;
+
+ /* set v1 flag if device is version 1 */
+ if (of_device_is_compatible(dev->of_node, "qcom,spi-qup-v1.1.1"))
+ controller->qup_v1 = 1;
+
+ spin_lock_init(&controller->lock);
+ init_completion(&controller->done);
+
+ iomode = readl_relaxed(base + QUP_IO_M_MODES);
+
+ size = QUP_IO_M_OUTPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->out_blk_sz = size * 16;
+ else
+ controller->out_blk_sz = 4;
+
+ size = QUP_IO_M_INPUT_BLOCK_SIZE(iomode);
+ if (size)
+ controller->in_blk_sz = size * 16;
+ else
+ controller->in_blk_sz = 4;
+
+ size = QUP_IO_M_OUTPUT_FIFO_SIZE(iomode);
+ controller->out_fifo_sz = controller->out_blk_sz * (2 << size);
+
+ size = QUP_IO_M_INPUT_FIFO_SIZE(iomode);
+ controller->in_fifo_sz = controller->in_blk_sz * (2 << size);
+
+ dev_info(dev, "IN:block:%d, fifo:%d, OUT:block:%d, fifo:%d\n",
+ controller->in_blk_sz, controller->in_fifo_sz,
+ controller->out_blk_sz, controller->out_fifo_sz);
+
+ writel_relaxed(1, base + QUP_SW_RESET);
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret) {
+ dev_err(dev, "cannot set RESET state\n");
+ goto error_dma;
+ }
+
+ writel_relaxed(0, base + QUP_OPERATIONAL);
+ writel_relaxed(0, base + QUP_IO_M_MODES);
+
+ if (!controller->qup_v1)
+ writel_relaxed(0, base + QUP_OPERATIONAL_MASK);
+
+ writel_relaxed(SPI_ERROR_CLK_UNDER_RUN | SPI_ERROR_CLK_OVER_RUN,
+ base + SPI_ERROR_FLAGS_EN);
+
+ /* if earlier version of the QUP, disable INPUT_OVERRUN */
+ if (controller->qup_v1)
+ writel_relaxed(QUP_ERROR_OUTPUT_OVER_RUN |
+ QUP_ERROR_INPUT_UNDER_RUN | QUP_ERROR_OUTPUT_UNDER_RUN,
+ base + QUP_ERROR_FLAGS_EN);
+
+ writel_relaxed(0, base + SPI_CONFIG);
+ writel_relaxed(SPI_IO_C_NO_TRI_STATE, base + SPI_IO_CONTROL);
+
+ ret = devm_request_irq(dev, irq, spi_qup_qup_irq,
+ IRQF_TRIGGER_HIGH, pdev->name, controller);
+ if (ret)
+ goto error_dma;
+
+ pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
+ pm_runtime_use_autosuspend(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ ret = devm_spi_register_master(dev, master);
+ if (ret)
+ goto disable_pm;
+
+ return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+error_dma:
+ spi_qup_release_dma(master);
+error:
+ clk_disable_unprepare(cclk);
+ clk_disable_unprepare(iclk);
+ spi_master_put(master);
+ return ret;
+}
+
+#ifdef CONFIG_PM
+static int spi_qup_pm_suspend_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Enable clocks auto gaiting */
+ config = readl(controller->base + QUP_CONFIG);
+ config |= QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+
+static int spi_qup_pm_resume_runtime(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ u32 config;
+
+ /* Disable clocks auto gaiting */
+ config = readl_relaxed(controller->base + QUP_CONFIG);
+ config &= ~QUP_CONFIG_CLOCK_AUTO_GATE;
+ writel_relaxed(config, controller->base + QUP_CONFIG);
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_qup_suspend(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+ return 0;
+}
+
+static int spi_qup_resume(struct device *device)
+{
+ struct spi_master *master = dev_get_drvdata(device);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(controller->iclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(controller->cclk);
+ if (ret)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int spi_qup_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = dev_get_drvdata(&pdev->dev);
+ struct spi_qup *controller = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0)
+ return ret;
+
+ ret = spi_qup_set_state(controller, QUP_STATE_RESET);
+ if (ret)
+ return ret;
+
+ spi_qup_release_dma(master);
+
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
+
+ pm_runtime_put_noidle(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static const struct of_device_id spi_qup_dt_match[] = {
+ { .compatible = "qcom,spi-qup-v1.1.1", },
+ { .compatible = "qcom,spi-qup-v2.1.1", },
+ { .compatible = "qcom,spi-qup-v2.2.1", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, spi_qup_dt_match);
+
+static const struct dev_pm_ops spi_qup_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_qup_suspend, spi_qup_resume)
+ SET_RUNTIME_PM_OPS(spi_qup_pm_suspend_runtime,
+ spi_qup_pm_resume_runtime,
+ NULL)
+};
+
+static struct platform_driver spi_qup_driver = {
+ .driver = {
+ .name = "spi_qup",
+ .pm = &spi_qup_dev_pm_ops,
+ .of_match_table = spi_qup_dt_match,
+ },
+ .probe = spi_qup_probe,
+ .remove = spi_qup_remove,
+};
+module_platform_driver(spi_qup_driver);
+
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_qup");
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
new file mode 100644
index 000000000..68e7efeb9
--- /dev/null
+++ b/drivers/spi/spi-rockchip.c
@@ -0,0 +1,890 @@
+/*
+ * Copyright (c) 2014, Fuzhou Rockchip Electronics Co., Ltd
+ * Author: Addy Ke <addy.ke@rock-chips.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/spi/spi.h>
+#include <linux/scatterlist.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/dmaengine.h>
+
+#define DRIVER_NAME "rockchip-spi"
+
+/* SPI register offsets */
+#define ROCKCHIP_SPI_CTRLR0 0x0000
+#define ROCKCHIP_SPI_CTRLR1 0x0004
+#define ROCKCHIP_SPI_SSIENR 0x0008
+#define ROCKCHIP_SPI_SER 0x000c
+#define ROCKCHIP_SPI_BAUDR 0x0010
+#define ROCKCHIP_SPI_TXFTLR 0x0014
+#define ROCKCHIP_SPI_RXFTLR 0x0018
+#define ROCKCHIP_SPI_TXFLR 0x001c
+#define ROCKCHIP_SPI_RXFLR 0x0020
+#define ROCKCHIP_SPI_SR 0x0024
+#define ROCKCHIP_SPI_IPR 0x0028
+#define ROCKCHIP_SPI_IMR 0x002c
+#define ROCKCHIP_SPI_ISR 0x0030
+#define ROCKCHIP_SPI_RISR 0x0034
+#define ROCKCHIP_SPI_ICR 0x0038
+#define ROCKCHIP_SPI_DMACR 0x003c
+#define ROCKCHIP_SPI_DMATDLR 0x0040
+#define ROCKCHIP_SPI_DMARDLR 0x0044
+#define ROCKCHIP_SPI_TXDR 0x0400
+#define ROCKCHIP_SPI_RXDR 0x0800
+
+/* Bit fields in CTRLR0 */
+#define CR0_DFS_OFFSET 0
+
+#define CR0_CFS_OFFSET 2
+
+#define CR0_SCPH_OFFSET 6
+
+#define CR0_SCPOL_OFFSET 7
+
+#define CR0_CSM_OFFSET 8
+#define CR0_CSM_KEEP 0x0
+/* ss_n be high for half sclk_out cycles */
+#define CR0_CSM_HALF 0X1
+/* ss_n be high for one sclk_out cycle */
+#define CR0_CSM_ONE 0x2
+
+/* ss_n to sclk_out delay */
+#define CR0_SSD_OFFSET 10
+/*
+ * The period between ss_n active and
+ * sclk_out active is half sclk_out cycles
+ */
+#define CR0_SSD_HALF 0x0
+/*
+ * The period between ss_n active and
+ * sclk_out active is one sclk_out cycle
+ */
+#define CR0_SSD_ONE 0x1
+
+#define CR0_EM_OFFSET 11
+#define CR0_EM_LITTLE 0x0
+#define CR0_EM_BIG 0x1
+
+#define CR0_FBM_OFFSET 12
+#define CR0_FBM_MSB 0x0
+#define CR0_FBM_LSB 0x1
+
+#define CR0_BHT_OFFSET 13
+#define CR0_BHT_16BIT 0x0
+#define CR0_BHT_8BIT 0x1
+
+#define CR0_RSD_OFFSET 14
+
+#define CR0_FRF_OFFSET 16
+#define CR0_FRF_SPI 0x0
+#define CR0_FRF_SSP 0x1
+#define CR0_FRF_MICROWIRE 0x2
+
+#define CR0_XFM_OFFSET 18
+#define CR0_XFM_MASK (0x03 << SPI_XFM_OFFSET)
+#define CR0_XFM_TR 0x0
+#define CR0_XFM_TO 0x1
+#define CR0_XFM_RO 0x2
+
+#define CR0_OPM_OFFSET 20
+#define CR0_OPM_MASTER 0x0
+#define CR0_OPM_SLAVE 0x1
+
+#define CR0_MTM_OFFSET 0x21
+
+/* Bit fields in SER, 2bit */
+#define SER_MASK 0x3
+
+/* Bit fields in SR, 5bit */
+#define SR_MASK 0x1f
+#define SR_BUSY (1 << 0)
+#define SR_TF_FULL (1 << 1)
+#define SR_TF_EMPTY (1 << 2)
+#define SR_RF_EMPTY (1 << 3)
+#define SR_RF_FULL (1 << 4)
+
+/* Bit fields in ISR, IMR, ISR, RISR, 5bit */
+#define INT_MASK 0x1f
+#define INT_TF_EMPTY (1 << 0)
+#define INT_TF_OVERFLOW (1 << 1)
+#define INT_RF_UNDERFLOW (1 << 2)
+#define INT_RF_OVERFLOW (1 << 3)
+#define INT_RF_FULL (1 << 4)
+
+/* Bit fields in ICR, 4bit */
+#define ICR_MASK 0x0f
+#define ICR_ALL (1 << 0)
+#define ICR_RF_UNDERFLOW (1 << 1)
+#define ICR_RF_OVERFLOW (1 << 2)
+#define ICR_TF_OVERFLOW (1 << 3)
+
+/* Bit fields in DMACR */
+#define RF_DMA_EN (1 << 0)
+#define TF_DMA_EN (1 << 1)
+
+#define RXBUSY (1 << 0)
+#define TXBUSY (1 << 1)
+
+/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
+#define MAX_SCLK_OUT 50000000
+
+enum rockchip_ssi_type {
+ SSI_MOTO_SPI = 0,
+ SSI_TI_SSP,
+ SSI_NS_MICROWIRE,
+};
+
+struct rockchip_spi_dma_data {
+ struct dma_chan *ch;
+ enum dma_transfer_direction direction;
+ dma_addr_t addr;
+};
+
+struct rockchip_spi {
+ struct device *dev;
+ struct spi_master *master;
+
+ struct clk *spiclk;
+ struct clk *apb_pclk;
+
+ void __iomem *regs;
+ /*depth of the FIFO buffer */
+ u32 fifo_len;
+ /* max bus freq supported */
+ u32 max_freq;
+ /* supported slave numbers */
+ enum rockchip_ssi_type type;
+
+ u16 mode;
+ u8 tmode;
+ u8 bpw;
+ u8 n_bytes;
+ u8 rsd_nsecs;
+ unsigned len;
+ u32 speed;
+
+ const void *tx;
+ const void *tx_end;
+ void *rx;
+ void *rx_end;
+
+ u32 state;
+ /* protect state */
+ spinlock_t lock;
+
+ struct completion xfer_completion;
+
+ u32 use_dma;
+ struct sg_table tx_sg;
+ struct sg_table rx_sg;
+ struct rockchip_spi_dma_data dma_rx;
+ struct rockchip_spi_dma_data dma_tx;
+};
+
+static inline void spi_enable_chip(struct rockchip_spi *rs, int enable)
+{
+ writel_relaxed((enable ? 1 : 0), rs->regs + ROCKCHIP_SPI_SSIENR);
+}
+
+static inline void spi_set_clk(struct rockchip_spi *rs, u16 div)
+{
+ writel_relaxed(div, rs->regs + ROCKCHIP_SPI_BAUDR);
+}
+
+static inline void flush_fifo(struct rockchip_spi *rs)
+{
+ while (readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR))
+ readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+}
+
+static inline void wait_for_idle(struct rockchip_spi *rs)
+{
+ unsigned long timeout = jiffies + msecs_to_jiffies(5);
+
+ do {
+ if (!(readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY))
+ return;
+ } while (!time_after(jiffies, timeout));
+
+ dev_warn(rs->dev, "spi controller is in busy state!\n");
+}
+
+static u32 get_fifo_len(struct rockchip_spi *rs)
+{
+ u32 fifo;
+
+ for (fifo = 2; fifo < 32; fifo++) {
+ writel_relaxed(fifo, rs->regs + ROCKCHIP_SPI_TXFTLR);
+ if (fifo != readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFTLR))
+ break;
+ }
+
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_TXFTLR);
+
+ return (fifo == 31) ? 0 : fifo;
+}
+
+static inline u32 tx_max(struct rockchip_spi *rs)
+{
+ u32 tx_left, tx_room;
+
+ tx_left = (rs->tx_end - rs->tx) / rs->n_bytes;
+ tx_room = rs->fifo_len - readl_relaxed(rs->regs + ROCKCHIP_SPI_TXFLR);
+
+ return min(tx_left, tx_room);
+}
+
+static inline u32 rx_max(struct rockchip_spi *rs)
+{
+ u32 rx_left = (rs->rx_end - rs->rx) / rs->n_bytes;
+ u32 rx_room = (u32)readl_relaxed(rs->regs + ROCKCHIP_SPI_RXFLR);
+
+ return min(rx_left, rx_room);
+}
+
+static void rockchip_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ u32 ser;
+ struct rockchip_spi *rs = spi_master_get_devdata(spi->master);
+
+ ser = readl_relaxed(rs->regs + ROCKCHIP_SPI_SER) & SER_MASK;
+
+ /*
+ * drivers/spi/spi.c:
+ * static void spi_set_cs(struct spi_device *spi, bool enable)
+ * {
+ * if (spi->mode & SPI_CS_HIGH)
+ * enable = !enable;
+ *
+ * if (spi->cs_gpio >= 0)
+ * gpio_set_value(spi->cs_gpio, !enable);
+ * else if (spi->master->set_cs)
+ * spi->master->set_cs(spi, !enable);
+ * }
+ *
+ * Note: enable(rockchip_spi_set_cs) = !enable(spi_set_cs)
+ */
+ if (!enable)
+ ser |= 1 << spi->chip_select;
+ else
+ ser &= ~(1 << spi->chip_select);
+
+ writel_relaxed(ser, rs->regs + ROCKCHIP_SPI_SER);
+}
+
+static int rockchip_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ rs->mode = spi->mode;
+
+ return 0;
+}
+
+static void rockchip_spi_handle_err(struct spi_master *master,
+ struct spi_message *msg)
+{
+ unsigned long flags;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ spin_lock_irqsave(&rs->lock, flags);
+
+ /*
+ * For DMA mode, we need terminate DMA channel and flush
+ * fifo for the next transfer if DMA thansfer timeout.
+ * handle_err() was called by core if transfer failed.
+ * Maybe it is reasonable for error handling here.
+ */
+ if (rs->use_dma) {
+ if (rs->state & RXBUSY) {
+ dmaengine_terminate_all(rs->dma_rx.ch);
+ flush_fifo(rs);
+ }
+
+ if (rs->state & TXBUSY)
+ dmaengine_terminate_all(rs->dma_tx.ch);
+ }
+
+ spin_unlock_irqrestore(&rs->lock, flags);
+}
+
+static int rockchip_spi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ spi_enable_chip(rs, 0);
+
+ return 0;
+}
+
+static void rockchip_spi_pio_writer(struct rockchip_spi *rs)
+{
+ u32 max = tx_max(rs);
+ u32 txw = 0;
+
+ while (max--) {
+ if (rs->n_bytes == 1)
+ txw = *(u8 *)(rs->tx);
+ else
+ txw = *(u16 *)(rs->tx);
+
+ writel_relaxed(txw, rs->regs + ROCKCHIP_SPI_TXDR);
+ rs->tx += rs->n_bytes;
+ }
+}
+
+static void rockchip_spi_pio_reader(struct rockchip_spi *rs)
+{
+ u32 max = rx_max(rs);
+ u32 rxw;
+
+ while (max--) {
+ rxw = readl_relaxed(rs->regs + ROCKCHIP_SPI_RXDR);
+ if (rs->n_bytes == 1)
+ *(u8 *)(rs->rx) = (u8)rxw;
+ else
+ *(u16 *)(rs->rx) = (u16)rxw;
+ rs->rx += rs->n_bytes;
+ }
+}
+
+static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
+{
+ int remain = 0;
+
+ do {
+ if (rs->tx) {
+ remain = rs->tx_end - rs->tx;
+ rockchip_spi_pio_writer(rs);
+ }
+
+ if (rs->rx) {
+ remain = rs->rx_end - rs->rx;
+ rockchip_spi_pio_reader(rs);
+ }
+
+ cpu_relax();
+ } while (remain);
+
+ /* If tx, wait until the FIFO data completely. */
+ if (rs->tx)
+ wait_for_idle(rs);
+
+ spi_enable_chip(rs, 0);
+
+ return 0;
+}
+
+static void rockchip_spi_dma_rxcb(void *data)
+{
+ unsigned long flags;
+ struct rockchip_spi *rs = data;
+
+ spin_lock_irqsave(&rs->lock, flags);
+
+ rs->state &= ~RXBUSY;
+ if (!(rs->state & TXBUSY)) {
+ spi_enable_chip(rs, 0);
+ spi_finalize_current_transfer(rs->master);
+ }
+
+ spin_unlock_irqrestore(&rs->lock, flags);
+}
+
+static void rockchip_spi_dma_txcb(void *data)
+{
+ unsigned long flags;
+ struct rockchip_spi *rs = data;
+
+ /* Wait until the FIFO data completely. */
+ wait_for_idle(rs);
+
+ spin_lock_irqsave(&rs->lock, flags);
+
+ rs->state &= ~TXBUSY;
+ if (!(rs->state & RXBUSY)) {
+ spi_enable_chip(rs, 0);
+ spi_finalize_current_transfer(rs->master);
+ }
+
+ spin_unlock_irqrestore(&rs->lock, flags);
+}
+
+static void rockchip_spi_prepare_dma(struct rockchip_spi *rs)
+{
+ unsigned long flags;
+ struct dma_slave_config rxconf, txconf;
+ struct dma_async_tx_descriptor *rxdesc, *txdesc;
+
+ spin_lock_irqsave(&rs->lock, flags);
+ rs->state &= ~RXBUSY;
+ rs->state &= ~TXBUSY;
+ spin_unlock_irqrestore(&rs->lock, flags);
+
+ rxdesc = NULL;
+ if (rs->rx) {
+ rxconf.direction = rs->dma_rx.direction;
+ rxconf.src_addr = rs->dma_rx.addr;
+ rxconf.src_addr_width = rs->n_bytes;
+ rxconf.src_maxburst = rs->n_bytes;
+ dmaengine_slave_config(rs->dma_rx.ch, &rxconf);
+
+ rxdesc = dmaengine_prep_slave_sg(
+ rs->dma_rx.ch,
+ rs->rx_sg.sgl, rs->rx_sg.nents,
+ rs->dma_rx.direction, DMA_PREP_INTERRUPT);
+
+ rxdesc->callback = rockchip_spi_dma_rxcb;
+ rxdesc->callback_param = rs;
+ }
+
+ txdesc = NULL;
+ if (rs->tx) {
+ txconf.direction = rs->dma_tx.direction;
+ txconf.dst_addr = rs->dma_tx.addr;
+ txconf.dst_addr_width = rs->n_bytes;
+ txconf.dst_maxburst = rs->n_bytes;
+ dmaengine_slave_config(rs->dma_tx.ch, &txconf);
+
+ txdesc = dmaengine_prep_slave_sg(
+ rs->dma_tx.ch,
+ rs->tx_sg.sgl, rs->tx_sg.nents,
+ rs->dma_tx.direction, DMA_PREP_INTERRUPT);
+
+ txdesc->callback = rockchip_spi_dma_txcb;
+ txdesc->callback_param = rs;
+ }
+
+ /* rx must be started before tx due to spi instinct */
+ if (rxdesc) {
+ spin_lock_irqsave(&rs->lock, flags);
+ rs->state |= RXBUSY;
+ spin_unlock_irqrestore(&rs->lock, flags);
+ dmaengine_submit(rxdesc);
+ dma_async_issue_pending(rs->dma_rx.ch);
+ }
+
+ if (txdesc) {
+ spin_lock_irqsave(&rs->lock, flags);
+ rs->state |= TXBUSY;
+ spin_unlock_irqrestore(&rs->lock, flags);
+ dmaengine_submit(txdesc);
+ dma_async_issue_pending(rs->dma_tx.ch);
+ }
+}
+
+static void rockchip_spi_config(struct rockchip_spi *rs)
+{
+ u32 div = 0;
+ u32 dmacr = 0;
+ int rsd = 0;
+
+ u32 cr0 = (CR0_BHT_8BIT << CR0_BHT_OFFSET)
+ | (CR0_SSD_ONE << CR0_SSD_OFFSET);
+
+ cr0 |= (rs->n_bytes << CR0_DFS_OFFSET);
+ cr0 |= ((rs->mode & 0x3) << CR0_SCPH_OFFSET);
+ cr0 |= (rs->tmode << CR0_XFM_OFFSET);
+ cr0 |= (rs->type << CR0_FRF_OFFSET);
+
+ if (rs->use_dma) {
+ if (rs->tx)
+ dmacr |= TF_DMA_EN;
+ if (rs->rx)
+ dmacr |= RF_DMA_EN;
+ }
+
+ if (WARN_ON(rs->speed > MAX_SCLK_OUT))
+ rs->speed = MAX_SCLK_OUT;
+
+ /* the minimum divsor is 2 */
+ if (rs->max_freq < 2 * rs->speed) {
+ clk_set_rate(rs->spiclk, 2 * rs->speed);
+ rs->max_freq = clk_get_rate(rs->spiclk);
+ }
+
+ /* div doesn't support odd number */
+ div = DIV_ROUND_UP(rs->max_freq, rs->speed);
+ div = (div + 1) & 0xfffe;
+
+ /* Rx sample delay is expressed in parent clock cycles (max 3) */
+ rsd = DIV_ROUND_CLOSEST(rs->rsd_nsecs * (rs->max_freq >> 8),
+ 1000000000 >> 8);
+ if (!rsd && rs->rsd_nsecs) {
+ pr_warn_once("rockchip-spi: %u Hz are too slow to express %u ns delay\n",
+ rs->max_freq, rs->rsd_nsecs);
+ } else if (rsd > 3) {
+ rsd = 3;
+ pr_warn_once("rockchip-spi: %u Hz are too fast to express %u ns delay, clamping at %u ns\n",
+ rs->max_freq, rs->rsd_nsecs,
+ rsd * 1000000000U / rs->max_freq);
+ }
+ cr0 |= rsd << CR0_RSD_OFFSET;
+
+ writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
+
+ writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_TXFTLR);
+ writel_relaxed(rs->fifo_len / 2 - 1, rs->regs + ROCKCHIP_SPI_RXFTLR);
+
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMATDLR);
+ writel_relaxed(0, rs->regs + ROCKCHIP_SPI_DMARDLR);
+ writel_relaxed(dmacr, rs->regs + ROCKCHIP_SPI_DMACR);
+
+ spi_set_clk(rs, div);
+
+ dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
+}
+
+static int rockchip_spi_transfer_one(
+ struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ int ret = 1;
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
+ (readl_relaxed(rs->regs + ROCKCHIP_SPI_SR) & SR_BUSY));
+
+ if (!xfer->tx_buf && !xfer->rx_buf) {
+ dev_err(rs->dev, "No buffer for transfer\n");
+ return -EINVAL;
+ }
+
+ rs->speed = xfer->speed_hz;
+ rs->bpw = xfer->bits_per_word;
+ rs->n_bytes = rs->bpw >> 3;
+
+ rs->tx = xfer->tx_buf;
+ rs->tx_end = rs->tx + xfer->len;
+ rs->rx = xfer->rx_buf;
+ rs->rx_end = rs->rx + xfer->len;
+ rs->len = xfer->len;
+
+ rs->tx_sg = xfer->tx_sg;
+ rs->rx_sg = xfer->rx_sg;
+
+ if (rs->tx && rs->rx)
+ rs->tmode = CR0_XFM_TR;
+ else if (rs->tx)
+ rs->tmode = CR0_XFM_TO;
+ else if (rs->rx)
+ rs->tmode = CR0_XFM_RO;
+
+ /* we need prepare dma before spi was enabled */
+ if (master->can_dma && master->can_dma(master, spi, xfer))
+ rs->use_dma = 1;
+ else
+ rs->use_dma = 0;
+
+ rockchip_spi_config(rs);
+
+ if (rs->use_dma) {
+ if (rs->tmode == CR0_XFM_RO) {
+ /* rx: dma must be prepared first */
+ rockchip_spi_prepare_dma(rs);
+ spi_enable_chip(rs, 1);
+ } else {
+ /* tx or tr: spi must be enabled first */
+ spi_enable_chip(rs, 1);
+ rockchip_spi_prepare_dma(rs);
+ }
+ } else {
+ spi_enable_chip(rs, 1);
+ ret = rockchip_spi_pio_transfer(rs);
+ }
+
+ return ret;
+}
+
+static bool rockchip_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ return (xfer->len > rs->fifo_len);
+}
+
+static int rockchip_spi_probe(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct rockchip_spi *rs;
+ struct spi_master *master;
+ struct resource *mem;
+ u32 rsd_nsecs;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct rockchip_spi));
+ if (!master)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, master);
+
+ rs = spi_master_get_devdata(master);
+ memset(rs, 0, sizeof(struct rockchip_spi));
+
+ /* Get basic io resource and map it */
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rs->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(rs->regs)) {
+ ret = PTR_ERR(rs->regs);
+ goto err_ioremap_resource;
+ }
+
+ rs->apb_pclk = devm_clk_get(&pdev->dev, "apb_pclk");
+ if (IS_ERR(rs->apb_pclk)) {
+ dev_err(&pdev->dev, "Failed to get apb_pclk\n");
+ ret = PTR_ERR(rs->apb_pclk);
+ goto err_ioremap_resource;
+ }
+
+ rs->spiclk = devm_clk_get(&pdev->dev, "spiclk");
+ if (IS_ERR(rs->spiclk)) {
+ dev_err(&pdev->dev, "Failed to get spi_pclk\n");
+ ret = PTR_ERR(rs->spiclk);
+ goto err_ioremap_resource;
+ }
+
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable apb_pclk\n");
+ goto err_ioremap_resource;
+ }
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to enable spi_clk\n");
+ goto err_spiclk_enable;
+ }
+
+ spi_enable_chip(rs, 0);
+
+ rs->type = SSI_MOTO_SPI;
+ rs->master = master;
+ rs->dev = &pdev->dev;
+ rs->max_freq = clk_get_rate(rs->spiclk);
+
+ if (!of_property_read_u32(pdev->dev.of_node, "rx-sample-delay-ns",
+ &rsd_nsecs))
+ rs->rsd_nsecs = rsd_nsecs;
+
+ rs->fifo_len = get_fifo_len(rs);
+ if (!rs->fifo_len) {
+ dev_err(&pdev->dev, "Failed to get fifo length\n");
+ ret = -EINVAL;
+ goto err_get_fifo_len;
+ }
+
+ spin_lock_init(&rs->lock);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LOOP;
+ master->num_chipselect = 2;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(16) | SPI_BPW_MASK(8);
+
+ master->set_cs = rockchip_spi_set_cs;
+ master->prepare_message = rockchip_spi_prepare_message;
+ master->unprepare_message = rockchip_spi_unprepare_message;
+ master->transfer_one = rockchip_spi_transfer_one;
+ master->handle_err = rockchip_spi_handle_err;
+
+ rs->dma_tx.ch = dma_request_slave_channel(rs->dev, "tx");
+ if (!rs->dma_tx.ch)
+ dev_warn(rs->dev, "Failed to request TX DMA channel\n");
+
+ rs->dma_rx.ch = dma_request_slave_channel(rs->dev, "rx");
+ if (!rs->dma_rx.ch) {
+ if (rs->dma_tx.ch) {
+ dma_release_channel(rs->dma_tx.ch);
+ rs->dma_tx.ch = NULL;
+ }
+ dev_warn(rs->dev, "Failed to request RX DMA channel\n");
+ }
+
+ if (rs->dma_tx.ch && rs->dma_rx.ch) {
+ rs->dma_tx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_TXDR);
+ rs->dma_rx.addr = (dma_addr_t)(mem->start + ROCKCHIP_SPI_RXDR);
+ rs->dma_tx.direction = DMA_MEM_TO_DEV;
+ rs->dma_rx.direction = DMA_DEV_TO_MEM;
+
+ master->can_dma = rockchip_spi_can_dma;
+ master->dma_tx = rs->dma_tx.ch;
+ master->dma_rx = rs->dma_rx.ch;
+ }
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto err_register_master;
+ }
+
+ return 0;
+
+err_register_master:
+ if (rs->dma_tx.ch)
+ dma_release_channel(rs->dma_tx.ch);
+ if (rs->dma_rx.ch)
+ dma_release_channel(rs->dma_rx.ch);
+err_get_fifo_len:
+ clk_disable_unprepare(rs->spiclk);
+err_spiclk_enable:
+ clk_disable_unprepare(rs->apb_pclk);
+err_ioremap_resource:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int rockchip_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+
+ if (rs->dma_tx.ch)
+ dma_release_channel(rs->dma_tx.ch);
+ if (rs->dma_rx.ch)
+ dma_release_channel(rs->dma_rx.ch);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int rockchip_spi_suspend(struct device *dev)
+{
+ int ret = 0;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ ret = spi_master_suspend(rs->master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+ }
+
+ return ret;
+}
+
+static int rockchip_spi_resume(struct device *dev)
+{
+ int ret = 0;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ if (!pm_runtime_suspended(dev)) {
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret < 0)
+ return ret;
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret < 0) {
+ clk_disable_unprepare(rs->apb_pclk);
+ return ret;
+ }
+ }
+
+ ret = spi_master_resume(rs->master);
+ if (ret < 0) {
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+ }
+
+ return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int rockchip_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(rs->spiclk);
+ clk_disable_unprepare(rs->apb_pclk);
+
+ return 0;
+}
+
+static int rockchip_spi_runtime_resume(struct device *dev)
+{
+ int ret;
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct rockchip_spi *rs = spi_master_get_devdata(master);
+
+ ret = clk_prepare_enable(rs->apb_pclk);
+ if (ret)
+ return ret;
+
+ ret = clk_prepare_enable(rs->spiclk);
+ if (ret)
+ clk_disable_unprepare(rs->apb_pclk);
+
+ return ret;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops rockchip_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(rockchip_spi_suspend, rockchip_spi_resume)
+ SET_RUNTIME_PM_OPS(rockchip_spi_runtime_suspend,
+ rockchip_spi_runtime_resume, NULL)
+};
+
+static const struct of_device_id rockchip_spi_dt_match[] = {
+ { .compatible = "rockchip,rk3066-spi", },
+ { .compatible = "rockchip,rk3188-spi", },
+ { .compatible = "rockchip,rk3288-spi", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, rockchip_spi_dt_match);
+
+static struct platform_driver rockchip_spi_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &rockchip_spi_pm,
+ .of_match_table = of_match_ptr(rockchip_spi_dt_match),
+ },
+ .probe = rockchip_spi_probe,
+ .remove = rockchip_spi_remove,
+};
+
+module_platform_driver(rockchip_spi_driver);
+
+MODULE_AUTHOR("Addy Ke <addy.ke@rock-chips.com>");
+MODULE_DESCRIPTION("ROCKCHIP SPI Controller Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
new file mode 100644
index 000000000..f6bac9e77
--- /dev/null
+++ b/drivers/spi/spi-rspi.c
@@ -0,0 +1,1326 @@
+/*
+ * SH RSPI driver
+ *
+ * Copyright (C) 2012, 2013 Renesas Solutions Corp.
+ * Copyright (C) 2014 Glider bvba
+ *
+ * Based on spi-sh.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/rspi.h>
+
+#define RSPI_SPCR 0x00 /* Control Register */
+#define RSPI_SSLP 0x01 /* Slave Select Polarity Register */
+#define RSPI_SPPCR 0x02 /* Pin Control Register */
+#define RSPI_SPSR 0x03 /* Status Register */
+#define RSPI_SPDR 0x04 /* Data Register */
+#define RSPI_SPSCR 0x08 /* Sequence Control Register */
+#define RSPI_SPSSR 0x09 /* Sequence Status Register */
+#define RSPI_SPBR 0x0a /* Bit Rate Register */
+#define RSPI_SPDCR 0x0b /* Data Control Register */
+#define RSPI_SPCKD 0x0c /* Clock Delay Register */
+#define RSPI_SSLND 0x0d /* Slave Select Negation Delay Register */
+#define RSPI_SPND 0x0e /* Next-Access Delay Register */
+#define RSPI_SPCR2 0x0f /* Control Register 2 (SH only) */
+#define RSPI_SPCMD0 0x10 /* Command Register 0 */
+#define RSPI_SPCMD1 0x12 /* Command Register 1 */
+#define RSPI_SPCMD2 0x14 /* Command Register 2 */
+#define RSPI_SPCMD3 0x16 /* Command Register 3 */
+#define RSPI_SPCMD4 0x18 /* Command Register 4 */
+#define RSPI_SPCMD5 0x1a /* Command Register 5 */
+#define RSPI_SPCMD6 0x1c /* Command Register 6 */
+#define RSPI_SPCMD7 0x1e /* Command Register 7 */
+#define RSPI_SPCMD(i) (RSPI_SPCMD0 + (i) * 2)
+#define RSPI_NUM_SPCMD 8
+#define RSPI_RZ_NUM_SPCMD 4
+#define QSPI_NUM_SPCMD 4
+
+/* RSPI on RZ only */
+#define RSPI_SPBFCR 0x20 /* Buffer Control Register */
+#define RSPI_SPBFDR 0x22 /* Buffer Data Count Setting Register */
+
+/* QSPI only */
+#define QSPI_SPBFCR 0x18 /* Buffer Control Register */
+#define QSPI_SPBDCR 0x1a /* Buffer Data Count Register */
+#define QSPI_SPBMUL0 0x1c /* Transfer Data Length Multiplier Setting Register 0 */
+#define QSPI_SPBMUL1 0x20 /* Transfer Data Length Multiplier Setting Register 1 */
+#define QSPI_SPBMUL2 0x24 /* Transfer Data Length Multiplier Setting Register 2 */
+#define QSPI_SPBMUL3 0x28 /* Transfer Data Length Multiplier Setting Register 3 */
+#define QSPI_SPBMUL(i) (QSPI_SPBMUL0 + (i) * 4)
+
+/* SPCR - Control Register */
+#define SPCR_SPRIE 0x80 /* Receive Interrupt Enable */
+#define SPCR_SPE 0x40 /* Function Enable */
+#define SPCR_SPTIE 0x20 /* Transmit Interrupt Enable */
+#define SPCR_SPEIE 0x10 /* Error Interrupt Enable */
+#define SPCR_MSTR 0x08 /* Master/Slave Mode Select */
+#define SPCR_MODFEN 0x04 /* Mode Fault Error Detection Enable */
+/* RSPI on SH only */
+#define SPCR_TXMD 0x02 /* TX Only Mode (vs. Full Duplex) */
+#define SPCR_SPMS 0x01 /* 3-wire Mode (vs. 4-wire) */
+/* QSPI on R-Car Gen2 only */
+#define SPCR_WSWAP 0x02 /* Word Swap of read-data for DMAC */
+#define SPCR_BSWAP 0x01 /* Byte Swap of read-data for DMAC */
+
+/* SSLP - Slave Select Polarity Register */
+#define SSLP_SSL1P 0x02 /* SSL1 Signal Polarity Setting */
+#define SSLP_SSL0P 0x01 /* SSL0 Signal Polarity Setting */
+
+/* SPPCR - Pin Control Register */
+#define SPPCR_MOIFE 0x20 /* MOSI Idle Value Fixing Enable */
+#define SPPCR_MOIFV 0x10 /* MOSI Idle Fixed Value */
+#define SPPCR_SPOM 0x04
+#define SPPCR_SPLP2 0x02 /* Loopback Mode 2 (non-inverting) */
+#define SPPCR_SPLP 0x01 /* Loopback Mode (inverting) */
+
+#define SPPCR_IO3FV 0x04 /* Single-/Dual-SPI Mode IO3 Output Fixed Value */
+#define SPPCR_IO2FV 0x04 /* Single-/Dual-SPI Mode IO2 Output Fixed Value */
+
+/* SPSR - Status Register */
+#define SPSR_SPRF 0x80 /* Receive Buffer Full Flag */
+#define SPSR_TEND 0x40 /* Transmit End */
+#define SPSR_SPTEF 0x20 /* Transmit Buffer Empty Flag */
+#define SPSR_PERF 0x08 /* Parity Error Flag */
+#define SPSR_MODF 0x04 /* Mode Fault Error Flag */
+#define SPSR_IDLNF 0x02 /* RSPI Idle Flag */
+#define SPSR_OVRF 0x01 /* Overrun Error Flag (RSPI only) */
+
+/* SPSCR - Sequence Control Register */
+#define SPSCR_SPSLN_MASK 0x07 /* Sequence Length Specification */
+
+/* SPSSR - Sequence Status Register */
+#define SPSSR_SPECM_MASK 0x70 /* Command Error Mask */
+#define SPSSR_SPCP_MASK 0x07 /* Command Pointer Mask */
+
+/* SPDCR - Data Control Register */
+#define SPDCR_TXDMY 0x80 /* Dummy Data Transmission Enable */
+#define SPDCR_SPLW1 0x40 /* Access Width Specification (RZ) */
+#define SPDCR_SPLW0 0x20 /* Access Width Specification (RZ) */
+#define SPDCR_SPLLWORD (SPDCR_SPLW1 | SPDCR_SPLW0)
+#define SPDCR_SPLWORD SPDCR_SPLW1
+#define SPDCR_SPLBYTE SPDCR_SPLW0
+#define SPDCR_SPLW 0x20 /* Access Width Specification (SH) */
+#define SPDCR_SPRDTD 0x10 /* Receive Transmit Data Select (SH) */
+#define SPDCR_SLSEL1 0x08
+#define SPDCR_SLSEL0 0x04
+#define SPDCR_SLSEL_MASK 0x0c /* SSL1 Output Select (SH) */
+#define SPDCR_SPFC1 0x02
+#define SPDCR_SPFC0 0x01
+#define SPDCR_SPFC_MASK 0x03 /* Frame Count Setting (1-4) (SH) */
+
+/* SPCKD - Clock Delay Register */
+#define SPCKD_SCKDL_MASK 0x07 /* Clock Delay Setting (1-8) */
+
+/* SSLND - Slave Select Negation Delay Register */
+#define SSLND_SLNDL_MASK 0x07 /* SSL Negation Delay Setting (1-8) */
+
+/* SPND - Next-Access Delay Register */
+#define SPND_SPNDL_MASK 0x07 /* Next-Access Delay Setting (1-8) */
+
+/* SPCR2 - Control Register 2 */
+#define SPCR2_PTE 0x08 /* Parity Self-Test Enable */
+#define SPCR2_SPIE 0x04 /* Idle Interrupt Enable */
+#define SPCR2_SPOE 0x02 /* Odd Parity Enable (vs. Even) */
+#define SPCR2_SPPE 0x01 /* Parity Enable */
+
+/* SPCMDn - Command Registers */
+#define SPCMD_SCKDEN 0x8000 /* Clock Delay Setting Enable */
+#define SPCMD_SLNDEN 0x4000 /* SSL Negation Delay Setting Enable */
+#define SPCMD_SPNDEN 0x2000 /* Next-Access Delay Enable */
+#define SPCMD_LSBF 0x1000 /* LSB First */
+#define SPCMD_SPB_MASK 0x0f00 /* Data Length Setting */
+#define SPCMD_SPB_8_TO_16(bit) (((bit - 1) << 8) & SPCMD_SPB_MASK)
+#define SPCMD_SPB_8BIT 0x0000 /* QSPI only */
+#define SPCMD_SPB_16BIT 0x0100
+#define SPCMD_SPB_20BIT 0x0000
+#define SPCMD_SPB_24BIT 0x0100
+#define SPCMD_SPB_32BIT 0x0200
+#define SPCMD_SSLKP 0x0080 /* SSL Signal Level Keeping */
+#define SPCMD_SPIMOD_MASK 0x0060 /* SPI Operating Mode (QSPI only) */
+#define SPCMD_SPIMOD1 0x0040
+#define SPCMD_SPIMOD0 0x0020
+#define SPCMD_SPIMOD_SINGLE 0
+#define SPCMD_SPIMOD_DUAL SPCMD_SPIMOD0
+#define SPCMD_SPIMOD_QUAD SPCMD_SPIMOD1
+#define SPCMD_SPRW 0x0010 /* SPI Read/Write Access (Dual/Quad) */
+#define SPCMD_SSLA_MASK 0x0030 /* SSL Assert Signal Setting (RSPI) */
+#define SPCMD_BRDV_MASK 0x000c /* Bit Rate Division Setting */
+#define SPCMD_CPOL 0x0002 /* Clock Polarity Setting */
+#define SPCMD_CPHA 0x0001 /* Clock Phase Setting */
+
+/* SPBFCR - Buffer Control Register */
+#define SPBFCR_TXRST 0x80 /* Transmit Buffer Data Reset */
+#define SPBFCR_RXRST 0x40 /* Receive Buffer Data Reset */
+#define SPBFCR_TXTRG_MASK 0x30 /* Transmit Buffer Data Triggering Number */
+#define SPBFCR_RXTRG_MASK 0x07 /* Receive Buffer Data Triggering Number */
+/* QSPI on R-Car Gen2 */
+#define SPBFCR_TXTRG_1B 0x00 /* 31 bytes (1 byte available) */
+#define SPBFCR_TXTRG_32B 0x30 /* 0 byte (32 bytes available) */
+#define SPBFCR_RXTRG_1B 0x00 /* 1 byte (31 bytes available) */
+#define SPBFCR_RXTRG_32B 0x07 /* 32 bytes (0 byte available) */
+
+#define QSPI_BUFFER_SIZE 32u
+
+struct rspi_data {
+ void __iomem *addr;
+ u32 max_speed_hz;
+ struct spi_master *master;
+ wait_queue_head_t wait;
+ struct clk *clk;
+ u16 spcmd;
+ u8 spsr;
+ u8 sppcr;
+ int rx_irq, tx_irq;
+ const struct spi_ops *ops;
+
+ unsigned dma_callbacked:1;
+ unsigned byte_access:1;
+};
+
+static void rspi_write8(const struct rspi_data *rspi, u8 data, u16 offset)
+{
+ iowrite8(data, rspi->addr + offset);
+}
+
+static void rspi_write16(const struct rspi_data *rspi, u16 data, u16 offset)
+{
+ iowrite16(data, rspi->addr + offset);
+}
+
+static void rspi_write32(const struct rspi_data *rspi, u32 data, u16 offset)
+{
+ iowrite32(data, rspi->addr + offset);
+}
+
+static u8 rspi_read8(const struct rspi_data *rspi, u16 offset)
+{
+ return ioread8(rspi->addr + offset);
+}
+
+static u16 rspi_read16(const struct rspi_data *rspi, u16 offset)
+{
+ return ioread16(rspi->addr + offset);
+}
+
+static void rspi_write_data(const struct rspi_data *rspi, u16 data)
+{
+ if (rspi->byte_access)
+ rspi_write8(rspi, data, RSPI_SPDR);
+ else /* 16 bit */
+ rspi_write16(rspi, data, RSPI_SPDR);
+}
+
+static u16 rspi_read_data(const struct rspi_data *rspi)
+{
+ if (rspi->byte_access)
+ return rspi_read8(rspi, RSPI_SPDR);
+ else /* 16 bit */
+ return rspi_read16(rspi, RSPI_SPDR);
+}
+
+/* optional functions */
+struct spi_ops {
+ int (*set_config_register)(struct rspi_data *rspi, int access_size);
+ int (*transfer_one)(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer);
+ u16 mode_bits;
+ u16 flags;
+ u16 fifo_size;
+};
+
+/*
+ * functions for RSPI on legacy SH
+ */
+static int rspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
+ 2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set 16-bit word access, 1 frame */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 0;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets parity, interrupt mask */
+ rspi_write8(rspi, 0x00, RSPI_SPCR2);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for RSPI on RZ
+ */
+static int rspi_rz_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk),
+ 2 * rspi->max_speed_hz) - 1;
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, SPDCR_SPLBYTE, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Sets SPCMD */
+ rspi->spcmd |= SPCMD_SPB_8_TO_16(access_size);
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Sets RSPI mode */
+ rspi_write8(rspi, SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+/*
+ * functions for QSPI
+ */
+static int qspi_set_config_register(struct rspi_data *rspi, int access_size)
+{
+ int spbr;
+
+ /* Sets output mode, MOSI signal, and (optionally) loopback */
+ rspi_write8(rspi, rspi->sppcr, RSPI_SPPCR);
+
+ /* Sets transfer bit rate */
+ spbr = DIV_ROUND_UP(clk_get_rate(rspi->clk), 2 * rspi->max_speed_hz);
+ rspi_write8(rspi, clamp(spbr, 0, 255), RSPI_SPBR);
+
+ /* Disable dummy transmission, set byte access */
+ rspi_write8(rspi, 0, RSPI_SPDCR);
+ rspi->byte_access = 1;
+
+ /* Sets RSPCK, SSL, next-access delay value */
+ rspi_write8(rspi, 0x00, RSPI_SPCKD);
+ rspi_write8(rspi, 0x00, RSPI_SSLND);
+ rspi_write8(rspi, 0x00, RSPI_SPND);
+
+ /* Data Length Setting */
+ if (access_size == 8)
+ rspi->spcmd |= SPCMD_SPB_8BIT;
+ else if (access_size == 16)
+ rspi->spcmd |= SPCMD_SPB_16BIT;
+ else
+ rspi->spcmd |= SPCMD_SPB_32BIT;
+
+ rspi->spcmd |= SPCMD_SCKDEN | SPCMD_SLNDEN | SPCMD_SPNDEN;
+
+ /* Resets transfer data length */
+ rspi_write32(rspi, 0, QSPI_SPBMUL0);
+
+ /* Resets transmit and receive buffer */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ /* Sets buffer to allow normal operation */
+ rspi_write8(rspi, 0x00, QSPI_SPBFCR);
+
+ /* Sets SPCMD */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+
+ /* Enables SPI function in master mode */
+ rspi_write8(rspi, SPCR_SPE | SPCR_MSTR, RSPI_SPCR);
+
+ return 0;
+}
+
+static void qspi_update(const struct rspi_data *rspi, u8 mask, u8 val, u8 reg)
+{
+ u8 data;
+
+ data = rspi_read8(rspi, reg);
+ data &= ~mask;
+ data |= (val & mask);
+ rspi_write8(rspi, data, reg);
+}
+
+static int qspi_set_send_trigger(struct rspi_data *rspi, unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_TXTRG_MASK,
+ SPBFCR_TXTRG_1B, QSPI_SPBFCR);
+ }
+
+ return n;
+}
+
+static void qspi_set_receive_trigger(struct rspi_data *rspi, unsigned int len)
+{
+ unsigned int n;
+
+ n = min(len, QSPI_BUFFER_SIZE);
+
+ if (len >= QSPI_BUFFER_SIZE) {
+ /* sets triggering number to 32 bytes */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_32B, QSPI_SPBFCR);
+ } else {
+ /* sets triggering number to 1 byte */
+ qspi_update(rspi, SPBFCR_RXTRG_MASK,
+ SPBFCR_RXTRG_1B, QSPI_SPBFCR);
+ }
+}
+
+#define set_config_register(spi, n) spi->ops->set_config_register(spi, n)
+
+static void rspi_enable_irq(const struct rspi_data *rspi, u8 enable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | enable, RSPI_SPCR);
+}
+
+static void rspi_disable_irq(const struct rspi_data *rspi, u8 disable)
+{
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~disable, RSPI_SPCR);
+}
+
+static int rspi_wait_for_interrupt(struct rspi_data *rspi, u8 wait_mask,
+ u8 enable_bit)
+{
+ int ret;
+
+ rspi->spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (rspi->spsr & wait_mask)
+ return 0;
+
+ rspi_enable_irq(rspi, enable_bit);
+ ret = wait_event_timeout(rspi->wait, rspi->spsr & wait_mask, HZ);
+ if (ret == 0 && !(rspi->spsr & wait_mask))
+ return -ETIMEDOUT;
+
+ return 0;
+}
+
+static inline int rspi_wait_for_tx_empty(struct rspi_data *rspi)
+{
+ return rspi_wait_for_interrupt(rspi, SPSR_SPTEF, SPCR_SPTIE);
+}
+
+static inline int rspi_wait_for_rx_full(struct rspi_data *rspi)
+{
+ return rspi_wait_for_interrupt(rspi, SPSR_SPRF, SPCR_SPRIE);
+}
+
+static int rspi_data_out(struct rspi_data *rspi, u8 data)
+{
+ int error = rspi_wait_for_tx_empty(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return error;
+ }
+ rspi_write_data(rspi, data);
+ return 0;
+}
+
+static int rspi_data_in(struct rspi_data *rspi)
+{
+ int error;
+ u8 data;
+
+ error = rspi_wait_for_rx_full(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return error;
+ }
+ data = rspi_read_data(rspi);
+ return data;
+}
+
+static int rspi_pio_transfer(struct rspi_data *rspi, const u8 *tx, u8 *rx,
+ unsigned int n)
+{
+ while (n-- > 0) {
+ if (tx) {
+ int ret = rspi_data_out(rspi, *tx++);
+ if (ret < 0)
+ return ret;
+ }
+ if (rx) {
+ int ret = rspi_data_in(rspi);
+ if (ret < 0)
+ return ret;
+ *rx++ = ret;
+ }
+ }
+
+ return 0;
+}
+
+static void rspi_dma_complete(void *arg)
+{
+ struct rspi_data *rspi = arg;
+
+ rspi->dma_callbacked = 1;
+ wake_up_interruptible(&rspi->wait);
+}
+
+static int rspi_dma_transfer(struct rspi_data *rspi, struct sg_table *tx,
+ struct sg_table *rx)
+{
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ u8 irq_mask = 0;
+ unsigned int other_irq = 0;
+ dma_cookie_t cookie;
+ int ret;
+
+ /* First prepare and submit the DMA request(s), as this may fail */
+ if (rx) {
+ desc_rx = dmaengine_prep_slave_sg(rspi->master->dma_rx,
+ rx->sgl, rx->nents, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ ret = -EAGAIN;
+ goto no_dma_rx;
+ }
+
+ desc_rx->callback = rspi_dma_complete;
+ desc_rx->callback_param = rspi;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_rx;
+ }
+
+ irq_mask |= SPCR_SPRIE;
+ }
+
+ if (tx) {
+ desc_tx = dmaengine_prep_slave_sg(rspi->master->dma_tx,
+ tx->sgl, tx->nents, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = rspi_dma_complete;
+ desc_tx->callback_param = rspi;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+
+ irq_mask |= SPCR_SPTIE;
+ }
+
+ /*
+ * DMAC needs SPxIE, but if SPxIE is set, the IRQ routine will be
+ * called. So, this driver disables the IRQ while DMA transfer.
+ */
+ if (tx)
+ disable_irq(other_irq = rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ disable_irq(rspi->rx_irq);
+
+ rspi_enable_irq(rspi, irq_mask);
+ rspi->dma_callbacked = 0;
+
+ /* Now start DMA */
+ if (rx)
+ dma_async_issue_pending(rspi->master->dma_rx);
+ if (tx)
+ dma_async_issue_pending(rspi->master->dma_tx);
+
+ ret = wait_event_interruptible_timeout(rspi->wait,
+ rspi->dma_callbacked, HZ);
+ if (ret > 0 && rspi->dma_callbacked)
+ ret = 0;
+ else if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ if (tx)
+ dmaengine_terminate_all(rspi->master->dma_tx);
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+ }
+
+ rspi_disable_irq(rspi, irq_mask);
+
+ if (tx)
+ enable_irq(rspi->tx_irq);
+ if (rx && rspi->rx_irq != other_irq)
+ enable_irq(rspi->rx_irq);
+
+ return ret;
+
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_all(rspi->master->dma_rx);
+no_dma_rx:
+ if (ret == -EAGAIN) {
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&rspi->master->dev),
+ dev_name(&rspi->master->dev));
+ }
+ return ret;
+}
+
+static void rspi_receive_init(const struct rspi_data *rspi)
+{
+ u8 spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read_data(rspi); /* dummy read */
+ if (spsr & SPSR_OVRF)
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPSR) & ~SPSR_OVRF,
+ RSPI_SPSR);
+}
+
+static void rspi_rz_receive_init(const struct rspi_data *rspi)
+{
+ rspi_receive_init(rspi);
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, RSPI_SPBFCR);
+ rspi_write8(rspi, 0, RSPI_SPBFCR);
+}
+
+static void qspi_receive_init(const struct rspi_data *rspi)
+{
+ u8 spsr;
+
+ spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ rspi_read_data(rspi); /* dummy read */
+ rspi_write8(rspi, SPBFCR_TXRST | SPBFCR_RXRST, QSPI_SPBFCR);
+ rspi_write8(rspi, 0, QSPI_SPBFCR);
+}
+
+static bool __rspi_can_dma(const struct rspi_data *rspi,
+ const struct spi_transfer *xfer)
+{
+ return xfer->len > rspi->ops->fifo_size;
+}
+
+static bool rspi_can_dma(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ return __rspi_can_dma(rspi, xfer);
+}
+
+static int rspi_dma_check_then_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ /* rx_buf can be NULL on RSPI on SH in TX-only Mode */
+ int ret = rspi_dma_transfer(rspi, &xfer->tx_sg,
+ xfer->rx_buf ? &xfer->rx_sg : NULL);
+ if (ret != -EAGAIN)
+ return 0;
+ }
+
+ return -EAGAIN;
+}
+
+static int rspi_common_transfer(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int ret;
+
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
+
+ ret = rspi_pio_transfer(rspi, xfer->tx_buf, xfer->rx_buf, xfer->len);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
+
+ return 0;
+}
+
+static int rspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ u8 spcr;
+
+ spcr = rspi_read8(rspi, RSPI_SPCR);
+ if (xfer->rx_buf) {
+ rspi_receive_init(rspi);
+ spcr &= ~SPCR_TXMD;
+ } else {
+ spcr |= SPCR_TXMD;
+ }
+ rspi_write8(rspi, spcr, RSPI_SPCR);
+
+ return rspi_common_transfer(rspi, xfer);
+}
+
+static int rspi_rz_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ rspi_rz_receive_init(rspi);
+
+ return rspi_common_transfer(rspi, xfer);
+}
+
+static int qspi_trigger_transfer_out_int(struct rspi_data *rspi, const u8 *tx,
+ u8 *rx, unsigned int len)
+{
+ int i, n, ret;
+ int error;
+
+ while (len > 0) {
+ n = qspi_set_send_trigger(rspi, len);
+ qspi_set_receive_trigger(rspi, len);
+ if (n == QSPI_BUFFER_SIZE) {
+ error = rspi_wait_for_tx_empty(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "transmit timeout\n");
+ return error;
+ }
+ for (i = 0; i < n; i++)
+ rspi_write_data(rspi, *tx++);
+
+ error = rspi_wait_for_rx_full(rspi);
+ if (error < 0) {
+ dev_err(&rspi->master->dev, "receive timeout\n");
+ return error;
+ }
+ for (i = 0; i < n; i++)
+ *rx++ = rspi_read_data(rspi);
+ } else {
+ ret = rspi_pio_transfer(rspi, tx, rx, n);
+ if (ret < 0)
+ return ret;
+ }
+ len -= n;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_out_in(struct rspi_data *rspi,
+ struct spi_transfer *xfer)
+{
+ int ret;
+
+ qspi_receive_init(rspi);
+
+ ret = rspi_dma_check_then_transfer(rspi, xfer);
+ if (ret != -EAGAIN)
+ return ret;
+
+ ret = qspi_trigger_transfer_out_int(rspi, xfer->tx_buf,
+ xfer->rx_buf, xfer->len);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int qspi_transfer_out(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ int ret;
+
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ ret = rspi_dma_transfer(rspi, &xfer->tx_sg, NULL);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ ret = rspi_pio_transfer(rspi, xfer->tx_buf, NULL, xfer->len);
+ if (ret < 0)
+ return ret;
+
+ /* Wait for the last transmission */
+ rspi_wait_for_tx_empty(rspi);
+
+ return 0;
+}
+
+static int qspi_transfer_in(struct rspi_data *rspi, struct spi_transfer *xfer)
+{
+ if (rspi->master->can_dma && __rspi_can_dma(rspi, xfer)) {
+ int ret = rspi_dma_transfer(rspi, NULL, &xfer->rx_sg);
+ if (ret != -EAGAIN)
+ return ret;
+ }
+
+ return rspi_pio_transfer(rspi, NULL, xfer->rx_buf, xfer->len);
+}
+
+static int qspi_transfer_one(struct spi_master *master, struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ if (spi->mode & SPI_LOOP) {
+ return qspi_transfer_out_in(rspi, xfer);
+ } else if (xfer->tx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Write */
+ return qspi_transfer_out(rspi, xfer);
+ } else if (xfer->rx_nbits > SPI_NBITS_SINGLE) {
+ /* Quad or Dual SPI Read */
+ return qspi_transfer_in(rspi, xfer);
+ } else {
+ /* Single SPI Transfer */
+ return qspi_transfer_out_in(rspi, xfer);
+ }
+}
+
+static int rspi_setup(struct spi_device *spi)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(spi->master);
+
+ rspi->max_speed_hz = spi->max_speed_hz;
+
+ rspi->spcmd = SPCMD_SSLKP;
+ if (spi->mode & SPI_CPOL)
+ rspi->spcmd |= SPCMD_CPOL;
+ if (spi->mode & SPI_CPHA)
+ rspi->spcmd |= SPCMD_CPHA;
+
+ /* CMOS output mode and MOSI signal from previous transfer */
+ rspi->sppcr = 0;
+ if (spi->mode & SPI_LOOP)
+ rspi->sppcr |= SPPCR_SPLP;
+
+ set_config_register(rspi, 8);
+
+ return 0;
+}
+
+static u16 qspi_transfer_mode(const struct spi_transfer *xfer)
+{
+ if (xfer->tx_buf)
+ switch (xfer->tx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL;
+ default:
+ return 0;
+ }
+ if (xfer->rx_buf)
+ switch (xfer->rx_nbits) {
+ case SPI_NBITS_QUAD:
+ return SPCMD_SPIMOD_QUAD | SPCMD_SPRW;
+ case SPI_NBITS_DUAL:
+ return SPCMD_SPIMOD_DUAL | SPCMD_SPRW;
+ default:
+ return 0;
+ }
+
+ return 0;
+}
+
+static int qspi_setup_sequencer(struct rspi_data *rspi,
+ const struct spi_message *msg)
+{
+ const struct spi_transfer *xfer;
+ unsigned int i = 0, len = 0;
+ u16 current_mode = 0xffff, mode;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ mode = qspi_transfer_mode(xfer);
+ if (mode == current_mode) {
+ len += xfer->len;
+ continue;
+ }
+
+ /* Transfer mode change */
+ if (i) {
+ /* Set transfer data length of previous transfer */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ }
+
+ if (i >= QSPI_NUM_SPCMD) {
+ dev_err(&msg->spi->dev,
+ "Too many different transfer modes");
+ return -EINVAL;
+ }
+
+ /* Program transfer mode for this transfer */
+ rspi_write16(rspi, rspi->spcmd | mode, RSPI_SPCMD(i));
+ current_mode = mode;
+ len = xfer->len;
+ i++;
+ }
+ if (i) {
+ /* Set final transfer data length and sequence length */
+ rspi_write32(rspi, len, QSPI_SPBMUL(i - 1));
+ rspi_write8(rspi, i - 1, RSPI_SPSCR);
+ }
+
+ return 0;
+}
+
+static int rspi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+ int ret;
+
+ if (msg->spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)) {
+ /* Setup sequencer for messages with multiple transfer modes */
+ ret = qspi_setup_sequencer(rspi, msg);
+ if (ret < 0)
+ return ret;
+ }
+
+ /* Enable SPI function in master mode */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) | SPCR_SPE, RSPI_SPCR);
+ return 0;
+}
+
+static int rspi_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct rspi_data *rspi = spi_master_get_devdata(master);
+
+ /* Disable SPI function */
+ rspi_write8(rspi, rspi_read8(rspi, RSPI_SPCR) & ~SPCR_SPE, RSPI_SPCR);
+
+ /* Reset sequencer for Single SPI Transfers */
+ rspi_write16(rspi, rspi->spcmd, RSPI_SPCMD0);
+ rspi_write8(rspi, 0, RSPI_SPSCR);
+ return 0;
+}
+
+static irqreturn_t rspi_irq_mux(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+ irqreturn_t ret = IRQ_NONE;
+ u8 disable_irq = 0;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF)
+ disable_irq |= SPCR_SPRIE;
+ if (spsr & SPSR_SPTEF)
+ disable_irq |= SPCR_SPTIE;
+
+ if (disable_irq) {
+ ret = IRQ_HANDLED;
+ rspi_disable_irq(rspi, disable_irq);
+ wake_up(&rspi->wait);
+ }
+
+ return ret;
+}
+
+static irqreturn_t rspi_irq_rx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPRF) {
+ rspi_disable_irq(rspi, SPCR_SPRIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static irqreturn_t rspi_irq_tx(int irq, void *_sr)
+{
+ struct rspi_data *rspi = _sr;
+ u8 spsr;
+
+ rspi->spsr = spsr = rspi_read8(rspi, RSPI_SPSR);
+ if (spsr & SPSR_SPTEF) {
+ rspi_disable_irq(rspi, SPCR_SPTIE);
+ wake_up(&rspi->wait);
+ return IRQ_HANDLED;
+ }
+
+ return 0;
+}
+
+static struct dma_chan *rspi_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir,
+ unsigned int id,
+ dma_addr_t port_addr)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+ if (!chan) {
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
+ return NULL;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static int rspi_request_dma(struct device *dev, struct spi_master *master,
+ const struct resource *res)
+{
+ const struct rspi_plat_data *rspi_pd = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (rspi_pd && rspi_pd->dma_tx_id && rspi_pd->dma_rx_id) {
+ dma_tx_id = rspi_pd->dma_tx_id;
+ dma_rx_id = rspi_pd->dma_rx_id;
+ } else {
+ /* The driver assumes no error. */
+ return 0;
+ }
+
+ master->dma_tx = rspi_request_dma_chan(dev, DMA_MEM_TO_DEV, dma_tx_id,
+ res->start + RSPI_SPDR);
+ if (!master->dma_tx)
+ return -ENODEV;
+
+ master->dma_rx = rspi_request_dma_chan(dev, DMA_DEV_TO_MEM, dma_rx_id,
+ res->start + RSPI_SPDR);
+ if (!master->dma_rx) {
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ return -ENODEV;
+ }
+
+ master->can_dma = rspi_can_dma;
+ dev_info(dev, "DMA available");
+ return 0;
+}
+
+static void rspi_release_dma(struct spi_master *master)
+{
+ if (master->dma_tx)
+ dma_release_channel(master->dma_tx);
+ if (master->dma_rx)
+ dma_release_channel(master->dma_rx);
+}
+
+static int rspi_remove(struct platform_device *pdev)
+{
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ rspi_release_dma(rspi->master);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct spi_ops rspi_ops = {
+ .set_config_register = rspi_set_config_register,
+ .transfer_one = rspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+ .flags = SPI_MASTER_MUST_TX,
+ .fifo_size = 8,
+};
+
+static const struct spi_ops rspi_rz_ops = {
+ .set_config_register = rspi_rz_set_config_register,
+ .transfer_one = rspi_rz_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP,
+ .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .fifo_size = 8, /* 8 for TX, 32 for RX */
+};
+
+static const struct spi_ops qspi_ops = {
+ .set_config_register = qspi_set_config_register,
+ .transfer_one = qspi_transfer_one,
+ .mode_bits = SPI_CPHA | SPI_CPOL | SPI_LOOP |
+ SPI_TX_DUAL | SPI_TX_QUAD |
+ SPI_RX_DUAL | SPI_RX_QUAD,
+ .flags = SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX,
+ .fifo_size = 32,
+};
+
+#ifdef CONFIG_OF
+static const struct of_device_id rspi_of_match[] = {
+ /* RSPI on legacy SH */
+ { .compatible = "renesas,rspi", .data = &rspi_ops },
+ /* RSPI on RZ/A1H */
+ { .compatible = "renesas,rspi-rz", .data = &rspi_rz_ops },
+ /* QSPI on R-Car Gen2 */
+ { .compatible = "renesas,qspi", .data = &qspi_ops },
+ { /* sentinel */ }
+};
+
+MODULE_DEVICE_TABLE(of, rspi_of_match);
+
+static int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ u32 num_cs;
+ int error;
+
+ /* Parse DT properties */
+ error = of_property_read_u32(dev->of_node, "num-cs", &num_cs);
+ if (error) {
+ dev_err(dev, "of_property_read_u32 num-cs failed %d\n", error);
+ return error;
+ }
+
+ master->num_chipselect = num_cs;
+ return 0;
+}
+#else
+#define rspi_of_match NULL
+static inline int rspi_parse_dt(struct device *dev, struct spi_master *master)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_OF */
+
+static int rspi_request_irq(struct device *dev, unsigned int irq,
+ irq_handler_t handler, const char *suffix,
+ void *dev_id)
+{
+ const char *name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s",
+ dev_name(dev), suffix);
+ if (!name)
+ return -ENOMEM;
+
+ return devm_request_irq(dev, irq, handler, 0, name, dev_id);
+}
+
+static int rspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct rspi_data *rspi;
+ int ret;
+ const struct of_device_id *of_id;
+ const struct rspi_plat_data *rspi_pd;
+ const struct spi_ops *ops;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct rspi_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ of_id = of_match_device(rspi_of_match, &pdev->dev);
+ if (of_id) {
+ ops = of_id->data;
+ ret = rspi_parse_dt(&pdev->dev, master);
+ if (ret)
+ goto error1;
+ } else {
+ ops = (struct spi_ops *)pdev->id_entry->driver_data;
+ rspi_pd = dev_get_platdata(&pdev->dev);
+ if (rspi_pd && rspi_pd->num_chipselect)
+ master->num_chipselect = rspi_pd->num_chipselect;
+ else
+ master->num_chipselect = 2; /* default */
+ }
+
+ /* ops parameter check */
+ if (!ops->set_config_register) {
+ dev_err(&pdev->dev, "there is no set_config_register\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+
+ rspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, rspi);
+ rspi->ops = ops;
+ rspi->master = master;
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ rspi->addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(rspi->addr)) {
+ ret = PTR_ERR(rspi->addr);
+ goto error1;
+ }
+
+ rspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(rspi->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(rspi->clk);
+ goto error1;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ init_waitqueue_head(&rspi->wait);
+
+ master->bus_num = pdev->id;
+ master->setup = rspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one = ops->transfer_one;
+ master->prepare_message = rspi_prepare_message;
+ master->unprepare_message = rspi_unprepare_message;
+ master->mode_bits = ops->mode_bits;
+ master->flags = ops->flags;
+ master->dev.of_node = pdev->dev.of_node;
+
+ ret = platform_get_irq_byname(pdev, "rx");
+ if (ret < 0) {
+ ret = platform_get_irq_byname(pdev, "mux");
+ if (ret < 0)
+ ret = platform_get_irq(pdev, 0);
+ if (ret >= 0)
+ rspi->rx_irq = rspi->tx_irq = ret;
+ } else {
+ rspi->rx_irq = ret;
+ ret = platform_get_irq_byname(pdev, "tx");
+ if (ret >= 0)
+ rspi->tx_irq = ret;
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ goto error2;
+ }
+
+ if (rspi->rx_irq == rspi->tx_irq) {
+ /* Single multiplexed interrupt */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_mux,
+ "mux", rspi);
+ } else {
+ /* Multi-interrupt mode, only SPRI and SPTI are used */
+ ret = rspi_request_irq(&pdev->dev, rspi->rx_irq, rspi_irq_rx,
+ "rx", rspi);
+ if (!ret)
+ ret = rspi_request_irq(&pdev->dev, rspi->tx_irq,
+ rspi_irq_tx, "tx", rspi);
+ }
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error2;
+ }
+
+ ret = rspi_request_dma(&pdev->dev, master, res);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto error3;
+ }
+
+ dev_info(&pdev->dev, "probed\n");
+
+ return 0;
+
+error3:
+ rspi_release_dma(master);
+error2:
+ pm_runtime_disable(&pdev->dev);
+error1:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_device_id spi_driver_ids[] = {
+ { "rspi", (kernel_ulong_t)&rspi_ops },
+ { "rspi-rz", (kernel_ulong_t)&rspi_rz_ops },
+ { "qspi", (kernel_ulong_t)&qspi_ops },
+ {},
+};
+
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
+static struct platform_driver rspi_driver = {
+ .probe = rspi_probe,
+ .remove = rspi_remove,
+ .id_table = spi_driver_ids,
+ .driver = {
+ .name = "renesas_spi",
+ .of_match_table = of_match_ptr(rspi_of_match),
+ },
+};
+module_platform_driver(rspi_driver);
+
+MODULE_DESCRIPTION("Renesas RSPI bus driver");
+MODULE_LICENSE("GPL v2");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:rspi");
diff --git a/drivers/spi/spi-s3c24xx-fiq.S b/drivers/spi/spi-s3c24xx-fiq.S
new file mode 100644
index 000000000..059f2dc1f
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx-fiq.S
@@ -0,0 +1,116 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.S
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer code
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+#include <mach/map.h>
+#include <mach/regs-irq.h>
+#include <plat/regs-spi.h>
+
+#include "spi-s3c24xx-fiq.h"
+
+ .text
+
+ @ entry to these routines is as follows, with the register names
+ @ defined in fiq.h so that they can be shared with the C files which
+ @ setup the calling registers.
+ @
+ @ fiq_rirq The base of the IRQ registers to find S3C2410_SRCPND
+ @ fiq_rtmp Temporary register to hold tx/rx data
+ @ fiq_rspi The base of the SPI register block
+ @ fiq_rtx The tx buffer pointer
+ @ fiq_rrx The rx buffer pointer
+ @ fiq_rcount The number of bytes to move
+
+ @ each entry starts with a word entry of how long it is
+ @ and an offset to the irq acknowledgment word
+
+ENTRY(s3c24xx_spi_fiq_rx)
+s3c24xx_spi_fix_rx:
+ .word fiq_rx_end - fiq_rx_start
+ .word fiq_rx_irq_ack - fiq_rx_start
+fiq_rx_start:
+ ldr fiq_rtmp, fiq_rx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ mov fiq_rtmp, #0xff
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ @@ set IRQ controller so that next op will trigger IRQ
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_rx_irq_ack:
+ .word 0
+fiq_rx_end:
+
+ENTRY(s3c24xx_spi_fiq_txrx)
+s3c24xx_spi_fiq_txrx:
+ .word fiq_txrx_end - fiq_txrx_start
+ .word fiq_txrx_irq_ack - fiq_txrx_start
+fiq_txrx_start:
+
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+ strb fiq_rtmp, [ fiq_rrx ], #1
+
+ ldr fiq_rtmp, fiq_txrx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_txrx_irq_ack:
+ .word 0
+
+fiq_txrx_end:
+
+ENTRY(s3c24xx_spi_fiq_tx)
+s3c24xx_spi_fix_tx:
+ .word fiq_tx_end - fiq_tx_start
+ .word fiq_tx_irq_ack - fiq_tx_start
+fiq_tx_start:
+ ldrb fiq_rtmp, [ fiq_rspi, # S3C2410_SPRDAT ]
+
+ ldr fiq_rtmp, fiq_tx_irq_ack
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_SRCPND - S3C24XX_VA_IRQ ]
+
+ ldrb fiq_rtmp, [ fiq_rtx ], #1
+ strb fiq_rtmp, [ fiq_rspi, # S3C2410_SPTDAT ]
+
+ subs fiq_rcount, fiq_rcount, #1
+ subnes pc, lr, #4 @@ return, still have work to do
+
+ mov fiq_rtmp, #0
+ str fiq_rtmp, [ fiq_rirq, # S3C2410_INTMOD - S3C24XX_VA_IRQ ]
+ subs pc, lr, #4
+
+fiq_tx_irq_ack:
+ .word 0
+
+fiq_tx_end:
+
+ .end
diff --git a/drivers/spi/spi-s3c24xx-fiq.h b/drivers/spi/spi-s3c24xx-fiq.h
new file mode 100644
index 000000000..a5950bb25
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx-fiq.h
@@ -0,0 +1,26 @@
+/* linux/drivers/spi/spi_s3c24xx_fiq.h
+ *
+ * Copyright 2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * S3C24XX SPI - FIQ pseudo-DMA transfer support
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+*/
+
+/* We have R8 through R13 to play with */
+
+#ifdef __ASSEMBLY__
+#define __REG_NR(x) r##x
+#else
+#define __REG_NR(x) (x)
+#endif
+
+#define fiq_rspi __REG_NR(8)
+#define fiq_rtmp __REG_NR(9)
+#define fiq_rrx __REG_NR(10)
+#define fiq_rtx __REG_NR(11)
+#define fiq_rcount __REG_NR(12)
+#define fiq_rirq __REG_NR(13)
diff --git a/drivers/spi/spi-s3c24xx.c b/drivers/spi/spi-s3c24xx.c
new file mode 100644
index 000000000..f747ca269
--- /dev/null
+++ b/drivers/spi/spi-s3c24xx.c
@@ -0,0 +1,673 @@
+/*
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright 2006-2009 Simtec Electronics
+ * Ben Dooks <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+*/
+
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/gpio.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/s3c24xx.h>
+#include <linux/module.h>
+
+#include <plat/regs-spi.h>
+
+#include <asm/fiq.h>
+
+#include "spi-s3c24xx-fiq.h"
+
+/**
+ * s3c24xx_spi_devstate - per device data
+ * @hz: Last frequency calculated for @sppre field.
+ * @mode: Last mode setting for the @spcon field.
+ * @spcon: Value to write to the SPCON register.
+ * @sppre: Value to write to the SPPRE register.
+ */
+struct s3c24xx_spi_devstate {
+ unsigned int hz;
+ unsigned int mode;
+ u8 spcon;
+ u8 sppre;
+};
+
+enum spi_fiq_mode {
+ FIQ_MODE_NONE = 0,
+ FIQ_MODE_TX = 1,
+ FIQ_MODE_RX = 2,
+ FIQ_MODE_TXRX = 3,
+};
+
+struct s3c24xx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+
+ void __iomem *regs;
+ int irq;
+ int len;
+ int count;
+
+ struct fiq_handler fiq_handler;
+ enum spi_fiq_mode fiq_mode;
+ unsigned char fiq_inuse;
+ unsigned char fiq_claimed;
+
+ void (*set_cs)(struct s3c2410_spi_info *spi,
+ int cs, int pol);
+
+ /* data buffers */
+ const unsigned char *tx;
+ unsigned char *rx;
+
+ struct clk *clk;
+ struct spi_master *master;
+ struct spi_device *curdev;
+ struct device *dev;
+ struct s3c2410_spi_info *pdata;
+};
+
+#define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT)
+#define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP)
+
+static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev)
+{
+ return spi_master_get_devdata(sdev->master);
+}
+
+static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol)
+{
+ gpio_set_value(spi->pin_cs, pol);
+}
+
+static void s3c24xx_spi_chipsel(struct spi_device *spi, int value)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0;
+
+ /* change the chipselect state and the state of the spi engine clock */
+
+ switch (value) {
+ case BITBANG_CS_INACTIVE:
+ hw->set_cs(hw->pdata, spi->chip_select, cspol^1);
+ writeb(cs->spcon, hw->regs + S3C2410_SPCON);
+ break;
+
+ case BITBANG_CS_ACTIVE:
+ writeb(cs->spcon | S3C2410_SPCON_ENSCK,
+ hw->regs + S3C2410_SPCON);
+ hw->set_cs(hw->pdata, spi->chip_select, cspol);
+ break;
+ }
+}
+
+static int s3c24xx_spi_update_state(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ unsigned int hz;
+ unsigned int div;
+ unsigned long clk;
+
+ hz = t ? t->speed_hz : spi->max_speed_hz;
+
+ if (!hz)
+ hz = spi->max_speed_hz;
+
+ if (spi->mode != cs->mode) {
+ u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK;
+
+ if (spi->mode & SPI_CPHA)
+ spcon |= S3C2410_SPCON_CPHA_FMTB;
+
+ if (spi->mode & SPI_CPOL)
+ spcon |= S3C2410_SPCON_CPOL_HIGH;
+
+ cs->mode = spi->mode;
+ cs->spcon = spcon;
+ }
+
+ if (cs->hz != hz) {
+ clk = clk_get_rate(hw->clk);
+ div = DIV_ROUND_UP(clk, hz * 2) - 1;
+
+ if (div > 255)
+ div = 255;
+
+ dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n",
+ div, hz, clk / (2 * (div + 1)));
+
+ cs->hz = hz;
+ cs->sppre = div;
+ }
+
+ return 0;
+}
+
+static int s3c24xx_spi_setupxfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ int ret;
+
+ ret = s3c24xx_spi_update_state(spi, t);
+ if (!ret)
+ writeb(cs->sppre, hw->regs + S3C2410_SPPRE);
+
+ return ret;
+}
+
+static int s3c24xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c24xx_spi_devstate *cs = spi->controller_state;
+ struct s3c24xx_spi *hw = to_hw(spi);
+ int ret;
+
+ /* allocate settings on the first call */
+ if (!cs) {
+ cs = devm_kzalloc(&spi->dev,
+ sizeof(struct s3c24xx_spi_devstate),
+ GFP_KERNEL);
+ if (!cs)
+ return -ENOMEM;
+
+ cs->spcon = SPCON_DEFAULT;
+ cs->hz = -1;
+ spi->controller_state = cs;
+ }
+
+ /* initialise the state from the device */
+ ret = s3c24xx_spi_update_state(spi, NULL);
+ if (ret)
+ return ret;
+
+ spin_lock(&hw->bitbang.lock);
+ if (!hw->bitbang.busy) {
+ hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE);
+ /* need to ndelay for 0.5 clocktick ? */
+ }
+ spin_unlock(&hw->bitbang.lock);
+
+ return 0;
+}
+
+static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count)
+{
+ return hw->tx ? hw->tx[count] : 0;
+}
+
+#ifdef CONFIG_SPI_S3C24XX_FIQ
+/* Support for FIQ based pseudo-DMA to improve the transfer speed.
+ *
+ * This code uses the assembly helper in spi_s3c24xx_spi.S which is
+ * used by the FIQ core to move data between main memory and the peripheral
+ * block. Since this is code running on the processor, there is no problem
+ * with cache coherency of the buffers, so we can use any buffer we like.
+ */
+
+/**
+ * struct spi_fiq_code - FIQ code and header
+ * @length: The length of the code fragment, excluding this header.
+ * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at.
+ * @data: The code itself to install as a FIQ handler.
+ */
+struct spi_fiq_code {
+ u32 length;
+ u32 ack_offset;
+ u8 data[0];
+};
+
+extern struct spi_fiq_code s3c24xx_spi_fiq_txrx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_tx;
+extern struct spi_fiq_code s3c24xx_spi_fiq_rx;
+
+/**
+ * ack_bit - turn IRQ into IRQ acknowledgement bit
+ * @irq: The interrupt number
+ *
+ * Returns the bit to write to the interrupt acknowledge register.
+ */
+static inline u32 ack_bit(unsigned int irq)
+{
+ return 1 << (irq - IRQ_EINT0);
+}
+
+/**
+ * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer
+ * @hw: The hardware state.
+ *
+ * Claim the FIQ handler (only one can be active at any one time) and
+ * then setup the correct transfer code for this transfer.
+ *
+ * This call updates all the necessary state information if successful,
+ * so the caller does not need to do anything more than start the transfer
+ * as normal, since the IRQ will have been re-routed to the FIQ handler.
+*/
+static void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw)
+{
+ struct pt_regs regs;
+ enum spi_fiq_mode mode;
+ struct spi_fiq_code *code;
+ int ret;
+
+ if (!hw->fiq_claimed) {
+ /* try and claim fiq if we haven't got it, and if not
+ * then return and simply use another transfer method */
+
+ ret = claim_fiq(&hw->fiq_handler);
+ if (ret)
+ return;
+ }
+
+ if (hw->tx && !hw->rx)
+ mode = FIQ_MODE_TX;
+ else if (hw->rx && !hw->tx)
+ mode = FIQ_MODE_RX;
+ else
+ mode = FIQ_MODE_TXRX;
+
+ regs.uregs[fiq_rspi] = (long)hw->regs;
+ regs.uregs[fiq_rrx] = (long)hw->rx;
+ regs.uregs[fiq_rtx] = (long)hw->tx + 1;
+ regs.uregs[fiq_rcount] = hw->len - 1;
+ regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ;
+
+ set_fiq_regs(&regs);
+
+ if (hw->fiq_mode != mode) {
+ u32 *ack_ptr;
+
+ hw->fiq_mode = mode;
+
+ switch (mode) {
+ case FIQ_MODE_TX:
+ code = &s3c24xx_spi_fiq_tx;
+ break;
+ case FIQ_MODE_RX:
+ code = &s3c24xx_spi_fiq_rx;
+ break;
+ case FIQ_MODE_TXRX:
+ code = &s3c24xx_spi_fiq_txrx;
+ break;
+ default:
+ code = NULL;
+ }
+
+ BUG_ON(!code);
+
+ ack_ptr = (u32 *)&code->data[code->ack_offset];
+ *ack_ptr = ack_bit(hw->irq);
+
+ set_fiq_handler(&code->data, code->length);
+ }
+
+ s3c24xx_set_fiq(hw->irq, true);
+
+ hw->fiq_mode = mode;
+ hw->fiq_inuse = 1;
+}
+
+/**
+ * s3c24xx_spi_fiqop - FIQ core code callback
+ * @pw: Data registered with the handler
+ * @release: Whether this is a release or a return.
+ *
+ * Called by the FIQ code when another module wants to use the FIQ, so
+ * return whether we are currently using this or not and then update our
+ * internal state.
+ */
+static int s3c24xx_spi_fiqop(void *pw, int release)
+{
+ struct s3c24xx_spi *hw = pw;
+ int ret = 0;
+
+ if (release) {
+ if (hw->fiq_inuse)
+ ret = -EBUSY;
+
+ /* note, we do not need to unroute the FIQ, as the FIQ
+ * vector code de-routes it to signal the end of transfer */
+
+ hw->fiq_mode = FIQ_MODE_NONE;
+ hw->fiq_claimed = 0;
+ } else {
+ hw->fiq_claimed = 1;
+ }
+
+ return ret;
+}
+
+/**
+ * s3c24xx_spi_initfiq - setup the information for the FIQ core
+ * @hw: The hardware state.
+ *
+ * Setup the fiq_handler block to pass to the FIQ core.
+ */
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw)
+{
+ hw->fiq_handler.dev_id = hw;
+ hw->fiq_handler.name = dev_name(hw->dev);
+ hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop;
+}
+
+/**
+ * s3c24xx_spi_usefiq - return if we should be using FIQ.
+ * @hw: The hardware state.
+ *
+ * Return true if the platform data specifies whether this channel is
+ * allowed to use the FIQ.
+ */
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw)
+{
+ return hw->pdata->use_fiq;
+}
+
+/**
+ * s3c24xx_spi_usingfiq - return if channel is using FIQ
+ * @spi: The hardware state.
+ *
+ * Return whether the channel is currently using the FIQ (separate from
+ * whether the FIQ is claimed).
+ */
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi)
+{
+ return spi->fiq_inuse;
+}
+#else
+
+static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { }
+static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { }
+static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; }
+static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; }
+
+#endif /* CONFIG_SPI_S3C24XX_FIQ */
+
+static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct s3c24xx_spi *hw = to_hw(spi);
+
+ hw->tx = t->tx_buf;
+ hw->rx = t->rx_buf;
+ hw->len = t->len;
+ hw->count = 0;
+
+ init_completion(&hw->done);
+
+ hw->fiq_inuse = 0;
+ if (s3c24xx_spi_usefiq(hw) && t->len >= 3)
+ s3c24xx_spi_tryfiq(hw);
+
+ /* send the first byte */
+ writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT);
+
+ wait_for_completion(&hw->done);
+ return hw->count;
+}
+
+static irqreturn_t s3c24xx_spi_irq(int irq, void *dev)
+{
+ struct s3c24xx_spi *hw = dev;
+ unsigned int spsta = readb(hw->regs + S3C2410_SPSTA);
+ unsigned int count = hw->count;
+
+ if (spsta & S3C2410_SPSTA_DCOL) {
+ dev_dbg(hw->dev, "data-collision\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ if (!(spsta & S3C2410_SPSTA_READY)) {
+ dev_dbg(hw->dev, "spi not ready for tx?\n");
+ complete(&hw->done);
+ goto irq_done;
+ }
+
+ if (!s3c24xx_spi_usingfiq(hw)) {
+ hw->count++;
+
+ if (hw->rx)
+ hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT);
+
+ count++;
+
+ if (count < hw->len)
+ writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT);
+ else
+ complete(&hw->done);
+ } else {
+ hw->count = hw->len;
+ hw->fiq_inuse = 0;
+
+ if (hw->rx)
+ hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT);
+
+ complete(&hw->done);
+ }
+
+ irq_done:
+ return IRQ_HANDLED;
+}
+
+static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw)
+{
+ /* for the moment, permanently enable the clock */
+
+ clk_enable(hw->clk);
+
+ /* program defaults into the registers */
+
+ writeb(0xff, hw->regs + S3C2410_SPPRE);
+ writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN);
+ writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON);
+
+ if (hw->pdata) {
+ if (hw->set_cs == s3c24xx_spi_gpiocs)
+ gpio_direction_output(hw->pdata->pin_cs, 1);
+
+ if (hw->pdata->gpio_setup)
+ hw->pdata->gpio_setup(hw->pdata, 1);
+ }
+}
+
+static int s3c24xx_spi_probe(struct platform_device *pdev)
+{
+ struct s3c2410_spi_info *pdata;
+ struct s3c24xx_spi *hw;
+ struct spi_master *master;
+ struct resource *res;
+ int err = 0;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "No memory for spi_master\n");
+ return -ENOMEM;
+ }
+
+ hw = spi_master_get_devdata(master);
+ memset(hw, 0, sizeof(struct s3c24xx_spi));
+
+ hw->master = master;
+ hw->pdata = pdata = dev_get_platdata(&pdev->dev);
+ hw->dev = &pdev->dev;
+
+ if (pdata == NULL) {
+ dev_err(&pdev->dev, "No platform data supplied\n");
+ err = -ENOENT;
+ goto err_no_pdata;
+ }
+
+ platform_set_drvdata(pdev, hw);
+ init_completion(&hw->done);
+
+ /* initialise fiq handler */
+
+ s3c24xx_spi_initfiq(hw);
+
+ /* setup the master state. */
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+
+ master->num_chipselect = hw->pdata->num_cs;
+ master->bus_num = pdata->bus_num;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ /* setup the state for the bitbang driver */
+
+ hw->bitbang.master = hw->master;
+ hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer;
+ hw->bitbang.chipselect = s3c24xx_spi_chipsel;
+ hw->bitbang.txrx_bufs = s3c24xx_spi_txrx;
+
+ hw->master->setup = s3c24xx_spi_setup;
+
+ dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang);
+
+ /* find and map our resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ hw->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(hw->regs)) {
+ err = PTR_ERR(hw->regs);
+ goto err_no_pdata;
+ }
+
+ hw->irq = platform_get_irq(pdev, 0);
+ if (hw->irq < 0) {
+ dev_err(&pdev->dev, "No IRQ specified\n");
+ err = -ENOENT;
+ goto err_no_pdata;
+ }
+
+ err = devm_request_irq(&pdev->dev, hw->irq, s3c24xx_spi_irq, 0,
+ pdev->name, hw);
+ if (err) {
+ dev_err(&pdev->dev, "Cannot claim IRQ\n");
+ goto err_no_pdata;
+ }
+
+ hw->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(hw->clk)) {
+ dev_err(&pdev->dev, "No clock for device\n");
+ err = PTR_ERR(hw->clk);
+ goto err_no_pdata;
+ }
+
+ /* setup any gpio we can */
+
+ if (!pdata->set_cs) {
+ if (pdata->pin_cs < 0) {
+ dev_err(&pdev->dev, "No chipselect pin\n");
+ err = -EINVAL;
+ goto err_register;
+ }
+
+ err = devm_gpio_request(&pdev->dev, pdata->pin_cs,
+ dev_name(&pdev->dev));
+ if (err) {
+ dev_err(&pdev->dev, "Failed to get gpio for cs\n");
+ goto err_register;
+ }
+
+ hw->set_cs = s3c24xx_spi_gpiocs;
+ gpio_direction_output(pdata->pin_cs, 1);
+ } else
+ hw->set_cs = pdata->set_cs;
+
+ s3c24xx_spi_initialsetup(hw);
+
+ /* register our spi controller */
+
+ err = spi_bitbang_start(&hw->bitbang);
+ if (err) {
+ dev_err(&pdev->dev, "Failed to register SPI master\n");
+ goto err_register;
+ }
+
+ return 0;
+
+ err_register:
+ clk_disable(hw->clk);
+
+ err_no_pdata:
+ spi_master_put(hw->master);
+ return err;
+}
+
+static int s3c24xx_spi_remove(struct platform_device *dev)
+{
+ struct s3c24xx_spi *hw = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&hw->bitbang);
+ clk_disable(hw->clk);
+ spi_master_put(hw->master);
+ return 0;
+}
+
+
+#ifdef CONFIG_PM
+
+static int s3c24xx_spi_suspend(struct device *dev)
+{
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(hw->master);
+ if (ret)
+ return ret;
+
+ if (hw->pdata && hw->pdata->gpio_setup)
+ hw->pdata->gpio_setup(hw->pdata, 0);
+
+ clk_disable(hw->clk);
+ return 0;
+}
+
+static int s3c24xx_spi_resume(struct device *dev)
+{
+ struct s3c24xx_spi *hw = dev_get_drvdata(dev);
+
+ s3c24xx_spi_initialsetup(hw);
+ return spi_master_resume(hw->master);
+}
+
+static const struct dev_pm_ops s3c24xx_spi_pmops = {
+ .suspend = s3c24xx_spi_suspend,
+ .resume = s3c24xx_spi_resume,
+};
+
+#define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops
+#else
+#define S3C24XX_SPI_PMOPS NULL
+#endif /* CONFIG_PM */
+
+MODULE_ALIAS("platform:s3c2410-spi");
+static struct platform_driver s3c24xx_spi_driver = {
+ .probe = s3c24xx_spi_probe,
+ .remove = s3c24xx_spi_remove,
+ .driver = {
+ .name = "s3c2410-spi",
+ .pm = S3C24XX_SPI_PMOPS,
+ },
+};
+module_platform_driver(s3c24xx_spi_driver);
+
+MODULE_DESCRIPTION("S3C24XX SPI Driver");
+MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-s3c64xx.c b/drivers/spi/spi-s3c64xx.c
new file mode 100644
index 000000000..b1c6731fb
--- /dev/null
+++ b/drivers/spi/spi-s3c64xx.c
@@ -0,0 +1,1406 @@
+/*
+ * Copyright (C) 2009 Samsung Electronics Ltd.
+ * Jaswinder Singh <jassi.brar@samsung.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/clk.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+
+#include <linux/platform_data/spi-s3c64xx.h>
+
+#define MAX_SPI_PORTS 6
+#define S3C64XX_SPI_QUIRK_POLL (1 << 0)
+#define S3C64XX_SPI_QUIRK_CS_AUTO (1 << 1)
+
+/* Registers and bit-fields */
+
+#define S3C64XX_SPI_CH_CFG 0x00
+#define S3C64XX_SPI_CLK_CFG 0x04
+#define S3C64XX_SPI_MODE_CFG 0x08
+#define S3C64XX_SPI_SLAVE_SEL 0x0C
+#define S3C64XX_SPI_INT_EN 0x10
+#define S3C64XX_SPI_STATUS 0x14
+#define S3C64XX_SPI_TX_DATA 0x18
+#define S3C64XX_SPI_RX_DATA 0x1C
+#define S3C64XX_SPI_PACKET_CNT 0x20
+#define S3C64XX_SPI_PENDING_CLR 0x24
+#define S3C64XX_SPI_SWAP_CFG 0x28
+#define S3C64XX_SPI_FB_CLK 0x2C
+
+#define S3C64XX_SPI_CH_HS_EN (1<<6) /* High Speed Enable */
+#define S3C64XX_SPI_CH_SW_RST (1<<5)
+#define S3C64XX_SPI_CH_SLAVE (1<<4)
+#define S3C64XX_SPI_CPOL_L (1<<3)
+#define S3C64XX_SPI_CPHA_B (1<<2)
+#define S3C64XX_SPI_CH_RXCH_ON (1<<1)
+#define S3C64XX_SPI_CH_TXCH_ON (1<<0)
+
+#define S3C64XX_SPI_CLKSEL_SRCMSK (3<<9)
+#define S3C64XX_SPI_CLKSEL_SRCSHFT 9
+#define S3C64XX_SPI_ENCLK_ENABLE (1<<8)
+#define S3C64XX_SPI_PSR_MASK 0xff
+
+#define S3C64XX_SPI_MODE_CH_TSZ_BYTE (0<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_HALFWORD (1<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_WORD (2<<29)
+#define S3C64XX_SPI_MODE_CH_TSZ_MASK (3<<29)
+#define S3C64XX_SPI_MODE_BUS_TSZ_BYTE (0<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD (1<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_WORD (2<<17)
+#define S3C64XX_SPI_MODE_BUS_TSZ_MASK (3<<17)
+#define S3C64XX_SPI_MODE_RXDMA_ON (1<<2)
+#define S3C64XX_SPI_MODE_TXDMA_ON (1<<1)
+#define S3C64XX_SPI_MODE_4BURST (1<<0)
+
+#define S3C64XX_SPI_SLAVE_AUTO (1<<1)
+#define S3C64XX_SPI_SLAVE_SIG_INACT (1<<0)
+#define S3C64XX_SPI_SLAVE_NSC_CNT_2 (2<<4)
+
+#define S3C64XX_SPI_INT_TRAILING_EN (1<<6)
+#define S3C64XX_SPI_INT_RX_OVERRUN_EN (1<<5)
+#define S3C64XX_SPI_INT_RX_UNDERRUN_EN (1<<4)
+#define S3C64XX_SPI_INT_TX_OVERRUN_EN (1<<3)
+#define S3C64XX_SPI_INT_TX_UNDERRUN_EN (1<<2)
+#define S3C64XX_SPI_INT_RX_FIFORDY_EN (1<<1)
+#define S3C64XX_SPI_INT_TX_FIFORDY_EN (1<<0)
+
+#define S3C64XX_SPI_ST_RX_OVERRUN_ERR (1<<5)
+#define S3C64XX_SPI_ST_RX_UNDERRUN_ERR (1<<4)
+#define S3C64XX_SPI_ST_TX_OVERRUN_ERR (1<<3)
+#define S3C64XX_SPI_ST_TX_UNDERRUN_ERR (1<<2)
+#define S3C64XX_SPI_ST_RX_FIFORDY (1<<1)
+#define S3C64XX_SPI_ST_TX_FIFORDY (1<<0)
+
+#define S3C64XX_SPI_PACKET_CNT_EN (1<<16)
+
+#define S3C64XX_SPI_PND_TX_UNDERRUN_CLR (1<<4)
+#define S3C64XX_SPI_PND_TX_OVERRUN_CLR (1<<3)
+#define S3C64XX_SPI_PND_RX_UNDERRUN_CLR (1<<2)
+#define S3C64XX_SPI_PND_RX_OVERRUN_CLR (1<<1)
+#define S3C64XX_SPI_PND_TRAILING_CLR (1<<0)
+
+#define S3C64XX_SPI_SWAP_RX_HALF_WORD (1<<7)
+#define S3C64XX_SPI_SWAP_RX_BYTE (1<<6)
+#define S3C64XX_SPI_SWAP_RX_BIT (1<<5)
+#define S3C64XX_SPI_SWAP_RX_EN (1<<4)
+#define S3C64XX_SPI_SWAP_TX_HALF_WORD (1<<3)
+#define S3C64XX_SPI_SWAP_TX_BYTE (1<<2)
+#define S3C64XX_SPI_SWAP_TX_BIT (1<<1)
+#define S3C64XX_SPI_SWAP_TX_EN (1<<0)
+
+#define S3C64XX_SPI_FBCLK_MSK (3<<0)
+
+#define FIFO_LVL_MASK(i) ((i)->port_conf->fifo_lvl_mask[i->port_id])
+#define S3C64XX_SPI_ST_TX_DONE(v, i) (((v) & \
+ (1 << (i)->port_conf->tx_st_done)) ? 1 : 0)
+#define TX_FIFO_LVL(v, i) (((v) >> 6) & FIFO_LVL_MASK(i))
+#define RX_FIFO_LVL(v, i) (((v) >> (i)->port_conf->rx_lvl_offset) & \
+ FIFO_LVL_MASK(i))
+
+#define S3C64XX_SPI_MAX_TRAILCNT 0x3ff
+#define S3C64XX_SPI_TRAILCNT_OFF 19
+
+#define S3C64XX_SPI_TRAILCNT S3C64XX_SPI_MAX_TRAILCNT
+
+#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t)
+#define is_polling(x) (x->port_conf->quirks & S3C64XX_SPI_QUIRK_POLL)
+
+#define RXBUSY (1<<2)
+#define TXBUSY (1<<3)
+
+struct s3c64xx_spi_dma_data {
+ struct dma_chan *ch;
+ enum dma_transfer_direction direction;
+ unsigned int dmach;
+};
+
+/**
+ * struct s3c64xx_spi_info - SPI Controller hardware info
+ * @fifo_lvl_mask: Bit-mask for {TX|RX}_FIFO_LVL bits in SPI_STATUS register.
+ * @rx_lvl_offset: Bit offset of RX_FIFO_LVL bits in SPI_STATUS regiter.
+ * @tx_st_done: Bit offset of TX_DONE bit in SPI_STATUS regiter.
+ * @high_speed: True, if the controller supports HIGH_SPEED_EN bit.
+ * @clk_from_cmu: True, if the controller does not include a clock mux and
+ * prescaler unit.
+ *
+ * The Samsung s3c64xx SPI controller are used on various Samsung SoC's but
+ * differ in some aspects such as the size of the fifo and spi bus clock
+ * setup. Such differences are specified to the driver using this structure
+ * which is provided as driver data to the driver.
+ */
+struct s3c64xx_spi_port_config {
+ int fifo_lvl_mask[MAX_SPI_PORTS];
+ int rx_lvl_offset;
+ int tx_st_done;
+ int quirks;
+ bool high_speed;
+ bool clk_from_cmu;
+};
+
+/**
+ * struct s3c64xx_spi_driver_data - Runtime info holder for SPI driver.
+ * @clk: Pointer to the spi clock.
+ * @src_clk: Pointer to the clock used to generate SPI signals.
+ * @master: Pointer to the SPI Protocol master.
+ * @cntrlr_info: Platform specific data for the controller this driver manages.
+ * @tgl_spi: Pointer to the last CS left untoggled by the cs_change hint.
+ * @lock: Controller specific lock.
+ * @state: Set of FLAGS to indicate status.
+ * @rx_dmach: Controller's DMA channel for Rx.
+ * @tx_dmach: Controller's DMA channel for Tx.
+ * @sfr_start: BUS address of SPI controller regs.
+ * @regs: Pointer to ioremap'ed controller registers.
+ * @irq: interrupt
+ * @xfer_completion: To indicate completion of xfer task.
+ * @cur_mode: Stores the active configuration of the controller.
+ * @cur_bpw: Stores the active bits per word settings.
+ * @cur_speed: Stores the active xfer clock speed.
+ */
+struct s3c64xx_spi_driver_data {
+ void __iomem *regs;
+ struct clk *clk;
+ struct clk *src_clk;
+ struct platform_device *pdev;
+ struct spi_master *master;
+ struct s3c64xx_spi_info *cntrlr_info;
+ struct spi_device *tgl_spi;
+ spinlock_t lock;
+ unsigned long sfr_start;
+ struct completion xfer_completion;
+ unsigned state;
+ unsigned cur_mode, cur_bpw;
+ unsigned cur_speed;
+ struct s3c64xx_spi_dma_data rx_dma;
+ struct s3c64xx_spi_dma_data tx_dma;
+ struct s3c64xx_spi_port_config *port_conf;
+ unsigned int port_id;
+};
+
+static void flush_fifo(struct s3c64xx_spi_driver_data *sdd)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long loops;
+ u32 val;
+
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_RXCH_ON | S3C64XX_SPI_CH_TXCH_ON);
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val |= S3C64XX_SPI_CH_SW_RST;
+ val &= ~S3C64XX_SPI_CH_HS_EN;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Flush TxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ } while (TX_FIFO_LVL(val, sdd) && loops--);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing TX FIFO\n");
+
+ /* Flush RxFIFO*/
+ loops = msecs_to_loops(1);
+ do {
+ val = readl(regs + S3C64XX_SPI_STATUS);
+ if (RX_FIFO_LVL(val, sdd))
+ readl(regs + S3C64XX_SPI_RX_DATA);
+ else
+ break;
+ } while (loops--);
+
+ if (loops == 0)
+ dev_warn(&sdd->pdev->dev, "Timed out flushing RX FIFO\n");
+
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~S3C64XX_SPI_CH_SW_RST;
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+}
+
+static void s3c64xx_spi_dmacb(void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_dma_data *dma = data;
+ unsigned long flags;
+
+ if (dma->direction == DMA_DEV_TO_MEM)
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ else
+ sdd = container_of(data,
+ struct s3c64xx_spi_driver_data, tx_dma);
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ if (dma->direction == DMA_DEV_TO_MEM) {
+ sdd->state &= ~RXBUSY;
+ if (!(sdd->state & TXBUSY))
+ complete(&sdd->xfer_completion);
+ } else {
+ sdd->state &= ~TXBUSY;
+ if (!(sdd->state & RXBUSY))
+ complete(&sdd->xfer_completion);
+ }
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+}
+
+static void prepare_dma(struct s3c64xx_spi_dma_data *dma,
+ struct sg_table *sgt)
+{
+ struct s3c64xx_spi_driver_data *sdd;
+ struct dma_slave_config config;
+ struct dma_async_tx_descriptor *desc;
+
+ memset(&config, 0, sizeof(config));
+
+ if (dma->direction == DMA_DEV_TO_MEM) {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, rx_dma);
+ config.direction = dma->direction;
+ config.src_addr = sdd->sfr_start + S3C64XX_SPI_RX_DATA;
+ config.src_addr_width = sdd->cur_bpw / 8;
+ config.src_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ } else {
+ sdd = container_of((void *)dma,
+ struct s3c64xx_spi_driver_data, tx_dma);
+ config.direction = dma->direction;
+ config.dst_addr = sdd->sfr_start + S3C64XX_SPI_TX_DATA;
+ config.dst_addr_width = sdd->cur_bpw / 8;
+ config.dst_maxburst = 1;
+ dmaengine_slave_config(dma->ch, &config);
+ }
+
+ desc = dmaengine_prep_slave_sg(dma->ch, sgt->sgl, sgt->nents,
+ dma->direction, DMA_PREP_INTERRUPT);
+
+ desc->callback = s3c64xx_spi_dmacb;
+ desc->callback_param = dma;
+
+ dmaengine_submit(desc);
+ dma_async_issue_pending(dma->ch);
+}
+
+static int s3c64xx_spi_prepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+ dma_filter_fn filter = sdd->cntrlr_info->filter;
+ struct device *dev = &sdd->pdev->dev;
+ dma_cap_mask_t mask;
+ int ret;
+
+ if (!is_polling(sdd)) {
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Acquire DMA channels */
+ sdd->rx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)(long)sdd->rx_dma.dmach, dev, "rx");
+ if (!sdd->rx_dma.ch) {
+ dev_err(dev, "Failed to get RX DMA channel\n");
+ ret = -EBUSY;
+ goto out;
+ }
+ spi->dma_rx = sdd->rx_dma.ch;
+
+ sdd->tx_dma.ch = dma_request_slave_channel_compat(mask, filter,
+ (void *)(long)sdd->tx_dma.dmach, dev, "tx");
+ if (!sdd->tx_dma.ch) {
+ dev_err(dev, "Failed to get TX DMA channel\n");
+ ret = -EBUSY;
+ goto out_rx;
+ }
+ spi->dma_tx = sdd->tx_dma.ch;
+ }
+
+ return 0;
+
+out_rx:
+ dma_release_channel(sdd->rx_dma.ch);
+out:
+ return ret;
+}
+
+static int s3c64xx_spi_unprepare_transfer(struct spi_master *spi)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(spi);
+
+ /* Free DMA channels */
+ if (!is_polling(sdd)) {
+ dma_release_channel(sdd->rx_dma.ch);
+ dma_release_channel(sdd->tx_dma.ch);
+ }
+
+ return 0;
+}
+
+static bool s3c64xx_spi_can_dma(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ return xfer->len > (FIFO_LVL_MASK(sdd) >> 1) + 1;
+}
+
+static void enable_datapath(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_device *spi,
+ struct spi_transfer *xfer, int dma_mode)
+{
+ void __iomem *regs = sdd->regs;
+ u32 modecfg, chcfg;
+
+ modecfg = readl(regs + S3C64XX_SPI_MODE_CFG);
+ modecfg &= ~(S3C64XX_SPI_MODE_TXDMA_ON | S3C64XX_SPI_MODE_RXDMA_ON);
+
+ chcfg = readl(regs + S3C64XX_SPI_CH_CFG);
+ chcfg &= ~S3C64XX_SPI_CH_TXCH_ON;
+
+ if (dma_mode) {
+ chcfg &= ~S3C64XX_SPI_CH_RXCH_ON;
+ } else {
+ /* Always shift in data in FIFO, even if xfer is Tx only,
+ * this helps setting PCKT_CNT value for generating clocks
+ * as exactly needed.
+ */
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ }
+
+ if (xfer->tx_buf != NULL) {
+ sdd->state |= TXBUSY;
+ chcfg |= S3C64XX_SPI_CH_TXCH_ON;
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_TXDMA_ON;
+ prepare_dma(&sdd->tx_dma, &xfer->tx_sg);
+ } else {
+ switch (sdd->cur_bpw) {
+ case 32:
+ iowrite32_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len / 4);
+ break;
+ case 16:
+ iowrite16_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len / 2);
+ break;
+ default:
+ iowrite8_rep(regs + S3C64XX_SPI_TX_DATA,
+ xfer->tx_buf, xfer->len);
+ break;
+ }
+ }
+ }
+
+ if (xfer->rx_buf != NULL) {
+ sdd->state |= RXBUSY;
+
+ if (sdd->port_conf->high_speed && sdd->cur_speed >= 30000000UL
+ && !(sdd->cur_mode & SPI_CPHA))
+ chcfg |= S3C64XX_SPI_CH_HS_EN;
+
+ if (dma_mode) {
+ modecfg |= S3C64XX_SPI_MODE_RXDMA_ON;
+ chcfg |= S3C64XX_SPI_CH_RXCH_ON;
+ writel(((xfer->len * 8 / sdd->cur_bpw) & 0xffff)
+ | S3C64XX_SPI_PACKET_CNT_EN,
+ regs + S3C64XX_SPI_PACKET_CNT);
+ prepare_dma(&sdd->rx_dma, &xfer->rx_sg);
+ }
+ }
+
+ writel(modecfg, regs + S3C64XX_SPI_MODE_CFG);
+ writel(chcfg, regs + S3C64XX_SPI_CH_CFG);
+}
+
+static u32 s3c64xx_spi_wait_for_timeout(struct s3c64xx_spi_driver_data *sdd,
+ int timeout_ms)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val = 1;
+ u32 status;
+
+ /* max fifo depth available */
+ u32 max_fifo = (FIFO_LVL_MASK(sdd) >> 1) + 1;
+
+ if (timeout_ms)
+ val = msecs_to_loops(timeout_ms);
+
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < max_fifo && --val);
+
+ /* return the actual received data length */
+ return RX_FIFO_LVL(status, sdd);
+}
+
+static int wait_for_dma(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
+
+ val = msecs_to_jiffies(ms) + 10;
+ val = wait_for_completion_timeout(&sdd->xfer_completion, val);
+
+ /*
+ * If the previous xfer was completed within timeout, then
+ * proceed further else return -EIO.
+ * DmaTx returns after simply writing data in the FIFO,
+ * w/o waiting for real transmission on the bus to finish.
+ * DmaRx returns only after Dma read data from FIFO which
+ * needs bus transmission to finish, so we don't worry if
+ * Xfer involved Rx(with or without Tx).
+ */
+ if (val && !xfer->rx_buf) {
+ val = msecs_to_loops(10);
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ while ((TX_FIFO_LVL(status, sdd)
+ || !S3C64XX_SPI_ST_TX_DONE(status, sdd))
+ && --val) {
+ cpu_relax();
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ }
+
+ }
+
+ /* If timed out while checking rx/tx status return error */
+ if (!val)
+ return -EIO;
+
+ return 0;
+}
+
+static int wait_for_pio(struct s3c64xx_spi_driver_data *sdd,
+ struct spi_transfer *xfer)
+{
+ void __iomem *regs = sdd->regs;
+ unsigned long val;
+ u32 status;
+ int loops;
+ u32 cpy_len;
+ u8 *buf;
+ int ms;
+
+ /* millisecs to xfer 'len' bytes @ 'cur_speed' */
+ ms = xfer->len * 8 * 1000 / sdd->cur_speed;
+ ms += 10; /* some tolerance */
+
+ val = msecs_to_loops(ms);
+ do {
+ status = readl(regs + S3C64XX_SPI_STATUS);
+ } while (RX_FIFO_LVL(status, sdd) < xfer->len && --val);
+
+
+ /* If it was only Tx */
+ if (!xfer->rx_buf) {
+ sdd->state &= ~TXBUSY;
+ return 0;
+ }
+
+ /*
+ * If the receive length is bigger than the controller fifo
+ * size, calculate the loops and read the fifo as many times.
+ * loops = length / max fifo size (calculated by using the
+ * fifo mask).
+ * For any size less than the fifo size the below code is
+ * executed atleast once.
+ */
+ loops = xfer->len / ((FIFO_LVL_MASK(sdd) >> 1) + 1);
+ buf = xfer->rx_buf;
+ do {
+ /* wait for data to be received in the fifo */
+ cpy_len = s3c64xx_spi_wait_for_timeout(sdd,
+ (loops ? ms : 0));
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ ioread32_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 4);
+ break;
+ case 16:
+ ioread16_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len / 2);
+ break;
+ default:
+ ioread8_rep(regs + S3C64XX_SPI_RX_DATA,
+ buf, cpy_len);
+ break;
+ }
+
+ buf = buf + cpy_len;
+ } while (loops--);
+ sdd->state &= ~RXBUSY;
+
+ return 0;
+}
+
+static void s3c64xx_spi_config(struct s3c64xx_spi_driver_data *sdd)
+{
+ void __iomem *regs = sdd->regs;
+ u32 val;
+
+ /* Disable Clock */
+ if (sdd->port_conf->clk_from_cmu) {
+ clk_disable_unprepare(sdd->src_clk);
+ } else {
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ }
+
+ /* Set Polarity and Phase */
+ val = readl(regs + S3C64XX_SPI_CH_CFG);
+ val &= ~(S3C64XX_SPI_CH_SLAVE |
+ S3C64XX_SPI_CPOL_L |
+ S3C64XX_SPI_CPHA_B);
+
+ if (sdd->cur_mode & SPI_CPOL)
+ val |= S3C64XX_SPI_CPOL_L;
+
+ if (sdd->cur_mode & SPI_CPHA)
+ val |= S3C64XX_SPI_CPHA_B;
+
+ writel(val, regs + S3C64XX_SPI_CH_CFG);
+
+ /* Set Channel & DMA Mode */
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~(S3C64XX_SPI_MODE_BUS_TSZ_MASK
+ | S3C64XX_SPI_MODE_CH_TSZ_MASK);
+
+ switch (sdd->cur_bpw) {
+ case 32:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_WORD;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_WORD;
+ break;
+ case 16:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_HALFWORD;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_HALFWORD;
+ break;
+ default:
+ val |= S3C64XX_SPI_MODE_BUS_TSZ_BYTE;
+ val |= S3C64XX_SPI_MODE_CH_TSZ_BYTE;
+ break;
+ }
+
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ if (sdd->port_conf->clk_from_cmu) {
+ /* Configure Clock */
+ /* There is half-multiplier before the SPI */
+ clk_set_rate(sdd->src_clk, sdd->cur_speed * 2);
+ /* Enable Clock */
+ clk_prepare_enable(sdd->src_clk);
+ } else {
+ /* Configure Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val &= ~S3C64XX_SPI_PSR_MASK;
+ val |= ((clk_get_rate(sdd->src_clk) / sdd->cur_speed / 2 - 1)
+ & S3C64XX_SPI_PSR_MASK);
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+
+ /* Enable Clock */
+ val = readl(regs + S3C64XX_SPI_CLK_CFG);
+ val |= S3C64XX_SPI_ENCLK_ENABLE;
+ writel(val, regs + S3C64XX_SPI_CLK_CFG);
+ }
+}
+
+#define XFER_DMAADDR_INVALID DMA_BIT_MASK(32)
+
+static int s3c64xx_spi_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+
+ /* If Master's(controller) state differs from that needed by Slave */
+ if (sdd->cur_speed != spi->max_speed_hz
+ || sdd->cur_mode != spi->mode
+ || sdd->cur_bpw != spi->bits_per_word) {
+ sdd->cur_bpw = spi->bits_per_word;
+ sdd->cur_speed = spi->max_speed_hz;
+ sdd->cur_mode = spi->mode;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Configure feedback delay */
+ writel(cs->fb_delay & 0x3, sdd->regs + S3C64XX_SPI_FB_CLK);
+
+ return 0;
+}
+
+static int s3c64xx_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int status;
+ u32 speed;
+ u8 bpw;
+ unsigned long flags;
+ int use_dma;
+
+ reinit_completion(&sdd->xfer_completion);
+
+ /* Only BPW and Speed may change across transfers */
+ bpw = xfer->bits_per_word;
+ speed = xfer->speed_hz ? : spi->max_speed_hz;
+
+ if (bpw != sdd->cur_bpw || speed != sdd->cur_speed) {
+ sdd->cur_bpw = bpw;
+ sdd->cur_speed = speed;
+ s3c64xx_spi_config(sdd);
+ }
+
+ /* Polling method for xfers not bigger than FIFO capacity */
+ use_dma = 0;
+ if (!is_polling(sdd) &&
+ (sdd->rx_dma.ch && sdd->tx_dma.ch &&
+ (xfer->len > ((FIFO_LVL_MASK(sdd) >> 1) + 1))))
+ use_dma = 1;
+
+ spin_lock_irqsave(&sdd->lock, flags);
+
+ /* Pending only which is to be done */
+ sdd->state &= ~RXBUSY;
+ sdd->state &= ~TXBUSY;
+
+ enable_datapath(sdd, spi, xfer, use_dma);
+
+ /* Start the signals */
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(0, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ else
+ writel(readl(sdd->regs + S3C64XX_SPI_SLAVE_SEL)
+ | S3C64XX_SPI_SLAVE_AUTO | S3C64XX_SPI_SLAVE_NSC_CNT_2,
+ sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+
+ spin_unlock_irqrestore(&sdd->lock, flags);
+
+ if (use_dma)
+ status = wait_for_dma(sdd, xfer);
+ else
+ status = wait_for_pio(sdd, xfer);
+
+ if (status) {
+ dev_err(&spi->dev, "I/O Error: rx-%d tx-%d res:rx-%c tx-%c len-%d\n",
+ xfer->rx_buf ? 1 : 0, xfer->tx_buf ? 1 : 0,
+ (sdd->state & RXBUSY) ? 'f' : 'p',
+ (sdd->state & TXBUSY) ? 'f' : 'p',
+ xfer->len);
+
+ if (use_dma) {
+ if (xfer->tx_buf != NULL
+ && (sdd->state & TXBUSY))
+ dmaengine_terminate_all(sdd->tx_dma.ch);
+ if (xfer->rx_buf != NULL
+ && (sdd->state & RXBUSY))
+ dmaengine_terminate_all(sdd->rx_dma.ch);
+ }
+ } else {
+ flush_fifo(sdd);
+ }
+
+ return status;
+}
+
+static struct s3c64xx_spi_csinfo *s3c64xx_get_slave_ctrldata(
+ struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs;
+ struct device_node *slave_np, *data_np = NULL;
+ u32 fb_delay = 0;
+
+ slave_np = spi->dev.of_node;
+ if (!slave_np) {
+ dev_err(&spi->dev, "device node not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ data_np = of_get_child_by_name(slave_np, "controller-data");
+ if (!data_np) {
+ dev_err(&spi->dev, "child node 'controller-data' not found\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ cs = kzalloc(sizeof(*cs), GFP_KERNEL);
+ if (!cs) {
+ of_node_put(data_np);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ of_property_read_u32(data_np, "samsung,spi-feedback-delay", &fb_delay);
+ cs->fb_delay = fb_delay;
+ of_node_put(data_np);
+ return cs;
+}
+
+/*
+ * Here we only check the validity of requested configuration
+ * and save the configuration in a local data-structure.
+ * The controller is actually configured only just before we
+ * get a message to transfer.
+ */
+static int s3c64xx_spi_setup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi->controller_data;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_info *sci;
+ int err;
+
+ sdd = spi_master_get_devdata(spi->master);
+ if (spi->dev.of_node) {
+ cs = s3c64xx_get_slave_ctrldata(spi);
+ spi->controller_data = cs;
+ } else if (cs) {
+ /* On non-DT platforms the SPI core will set spi->cs_gpio
+ * to -ENOENT. The GPIO pin used to drive the chip select
+ * is defined by using platform data so spi->cs_gpio value
+ * has to be override to have the proper GPIO pin number.
+ */
+ spi->cs_gpio = cs->line;
+ }
+
+ if (IS_ERR_OR_NULL(cs)) {
+ dev_err(&spi->dev, "No CS for SPI(%d)\n", spi->chip_select);
+ return -ENODEV;
+ }
+
+ if (!spi_get_ctldata(spi)) {
+ if (gpio_is_valid(spi->cs_gpio)) {
+ err = gpio_request_one(spi->cs_gpio, GPIOF_OUT_INIT_HIGH,
+ dev_name(&spi->dev));
+ if (err) {
+ dev_err(&spi->dev,
+ "Failed to get /CS gpio [%d]: %d\n",
+ spi->cs_gpio, err);
+ goto err_gpio_req;
+ }
+ }
+
+ spi_set_ctldata(spi, cs);
+ }
+
+ sci = sdd->cntrlr_info;
+
+ pm_runtime_get_sync(&sdd->pdev->dev);
+
+ /* Check if we can provide the requested rate */
+ if (!sdd->port_conf->clk_from_cmu) {
+ u32 psr, speed;
+
+ /* Max possible */
+ speed = clk_get_rate(sdd->src_clk) / 2 / (0 + 1);
+
+ if (spi->max_speed_hz > speed)
+ spi->max_speed_hz = speed;
+
+ psr = clk_get_rate(sdd->src_clk) / 2 / spi->max_speed_hz - 1;
+ psr &= S3C64XX_SPI_PSR_MASK;
+ if (psr == S3C64XX_SPI_PSR_MASK)
+ psr--;
+
+ speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz < speed) {
+ if (psr+1 < S3C64XX_SPI_PSR_MASK) {
+ psr++;
+ } else {
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ speed = clk_get_rate(sdd->src_clk) / 2 / (psr + 1);
+ if (spi->max_speed_hz >= speed) {
+ spi->max_speed_hz = speed;
+ } else {
+ dev_err(&spi->dev, "Can't set %dHz transfer speed\n",
+ spi->max_speed_hz);
+ err = -EINVAL;
+ goto setup_exit;
+ }
+ }
+
+ pm_runtime_put(&sdd->pdev->dev);
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+ return 0;
+
+setup_exit:
+ pm_runtime_put(&sdd->pdev->dev);
+ /* setup() returns with device de-selected */
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+
+ if (gpio_is_valid(spi->cs_gpio))
+ gpio_free(spi->cs_gpio);
+ spi_set_ctldata(spi, NULL);
+
+err_gpio_req:
+ if (spi->dev.of_node)
+ kfree(cs);
+
+ return err;
+}
+
+static void s3c64xx_spi_cleanup(struct spi_device *spi)
+{
+ struct s3c64xx_spi_csinfo *cs = spi_get_ctldata(spi);
+
+ if (gpio_is_valid(spi->cs_gpio)) {
+ gpio_free(spi->cs_gpio);
+ if (spi->dev.of_node)
+ kfree(cs);
+ else {
+ /* On non-DT platforms, the SPI core sets
+ * spi->cs_gpio to -ENOENT and .setup()
+ * overrides it with the GPIO pin value
+ * passed using platform data.
+ */
+ spi->cs_gpio = -ENOENT;
+ }
+ }
+
+ spi_set_ctldata(spi, NULL);
+}
+
+static irqreturn_t s3c64xx_spi_irq(int irq, void *data)
+{
+ struct s3c64xx_spi_driver_data *sdd = data;
+ struct spi_master *spi = sdd->master;
+ unsigned int val, clr = 0;
+
+ val = readl(sdd->regs + S3C64XX_SPI_STATUS);
+
+ if (val & S3C64XX_SPI_ST_RX_OVERRUN_ERR) {
+ clr = S3C64XX_SPI_PND_RX_OVERRUN_CLR;
+ dev_err(&spi->dev, "RX overrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_RX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_RX_UNDERRUN_CLR;
+ dev_err(&spi->dev, "RX underrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_TX_OVERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_OVERRUN_CLR;
+ dev_err(&spi->dev, "TX overrun\n");
+ }
+ if (val & S3C64XX_SPI_ST_TX_UNDERRUN_ERR) {
+ clr |= S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ dev_err(&spi->dev, "TX underrun\n");
+ }
+
+ /* Clear the pending irq by setting and then clearing it */
+ writel(clr, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, sdd->regs + S3C64XX_SPI_PENDING_CLR);
+
+ return IRQ_HANDLED;
+}
+
+static void s3c64xx_spi_hwinit(struct s3c64xx_spi_driver_data *sdd, int channel)
+{
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+ void __iomem *regs = sdd->regs;
+ unsigned int val;
+
+ sdd->cur_speed = 0;
+
+ if (!(sdd->port_conf->quirks & S3C64XX_SPI_QUIRK_CS_AUTO))
+ writel(S3C64XX_SPI_SLAVE_SIG_INACT, sdd->regs + S3C64XX_SPI_SLAVE_SEL);
+
+ /* Disable Interrupts - we use Polling if not DMA mode */
+ writel(0, regs + S3C64XX_SPI_INT_EN);
+
+ if (!sdd->port_conf->clk_from_cmu)
+ writel(sci->src_clk_nr << S3C64XX_SPI_CLKSEL_SRCSHFT,
+ regs + S3C64XX_SPI_CLK_CFG);
+ writel(0, regs + S3C64XX_SPI_MODE_CFG);
+ writel(0, regs + S3C64XX_SPI_PACKET_CNT);
+
+ /* Clear any irq pending bits, should set and clear the bits */
+ val = S3C64XX_SPI_PND_RX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_RX_UNDERRUN_CLR |
+ S3C64XX_SPI_PND_TX_OVERRUN_CLR |
+ S3C64XX_SPI_PND_TX_UNDERRUN_CLR;
+ writel(val, regs + S3C64XX_SPI_PENDING_CLR);
+ writel(0, regs + S3C64XX_SPI_PENDING_CLR);
+
+ writel(0, regs + S3C64XX_SPI_SWAP_CFG);
+
+ val = readl(regs + S3C64XX_SPI_MODE_CFG);
+ val &= ~S3C64XX_SPI_MODE_4BURST;
+ val &= ~(S3C64XX_SPI_MAX_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ val |= (S3C64XX_SPI_TRAILCNT << S3C64XX_SPI_TRAILCNT_OFF);
+ writel(val, regs + S3C64XX_SPI_MODE_CFG);
+
+ flush_fifo(sdd);
+}
+
+#ifdef CONFIG_OF
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ struct s3c64xx_spi_info *sci;
+ u32 temp;
+
+ sci = devm_kzalloc(dev, sizeof(*sci), GFP_KERNEL);
+ if (!sci)
+ return ERR_PTR(-ENOMEM);
+
+ if (of_property_read_u32(dev->of_node, "samsung,spi-src-clk", &temp)) {
+ dev_warn(dev, "spi bus clock parent not specified, using clock at index 0 as parent\n");
+ sci->src_clk_nr = 0;
+ } else {
+ sci->src_clk_nr = temp;
+ }
+
+ if (of_property_read_u32(dev->of_node, "num-cs", &temp)) {
+ dev_warn(dev, "number of chip select lines not specified, assuming 1 chip select line\n");
+ sci->num_cs = 1;
+ } else {
+ sci->num_cs = temp;
+ }
+
+ return sci;
+}
+#else
+static struct s3c64xx_spi_info *s3c64xx_spi_parse_dt(struct device *dev)
+{
+ return dev_get_platdata(dev);
+}
+#endif
+
+static const struct of_device_id s3c64xx_spi_dt_match[];
+
+static inline struct s3c64xx_spi_port_config *s3c64xx_spi_get_port_config(
+ struct platform_device *pdev)
+{
+#ifdef CONFIG_OF
+ if (pdev->dev.of_node) {
+ const struct of_device_id *match;
+ match = of_match_node(s3c64xx_spi_dt_match, pdev->dev.of_node);
+ return (struct s3c64xx_spi_port_config *)match->data;
+ }
+#endif
+ return (struct s3c64xx_spi_port_config *)
+ platform_get_device_id(pdev)->driver_data;
+}
+
+static int s3c64xx_spi_probe(struct platform_device *pdev)
+{
+ struct resource *mem_res;
+ struct resource *res;
+ struct s3c64xx_spi_driver_data *sdd;
+ struct s3c64xx_spi_info *sci = dev_get_platdata(&pdev->dev);
+ struct spi_master *master;
+ int ret, irq;
+ char clk_name[16];
+
+ if (!sci && pdev->dev.of_node) {
+ sci = s3c64xx_spi_parse_dt(&pdev->dev);
+ if (IS_ERR(sci))
+ return PTR_ERR(sci);
+ }
+
+ if (!sci) {
+ dev_err(&pdev->dev, "platform_data missing!\n");
+ return -ENODEV;
+ }
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (mem_res == NULL) {
+ dev_err(&pdev->dev, "Unable to get SPI MEM resource\n");
+ return -ENXIO;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_warn(&pdev->dev, "Failed to get IRQ: %d\n", irq);
+ return irq;
+ }
+
+ master = spi_alloc_master(&pdev->dev,
+ sizeof(struct s3c64xx_spi_driver_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+
+ sdd = spi_master_get_devdata(master);
+ sdd->port_conf = s3c64xx_spi_get_port_config(pdev);
+ sdd->master = master;
+ sdd->cntrlr_info = sci;
+ sdd->pdev = pdev;
+ sdd->sfr_start = mem_res->start;
+ if (pdev->dev.of_node) {
+ ret = of_alias_get_id(pdev->dev.of_node, "spi");
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to get alias id, errno %d\n",
+ ret);
+ goto err0;
+ }
+ sdd->port_id = ret;
+ } else {
+ sdd->port_id = pdev->id;
+ }
+
+ sdd->cur_bpw = 8;
+
+ if (!sdd->pdev->dev.of_node) {
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 0);
+ if (!res) {
+ dev_warn(&pdev->dev, "Unable to get SPI tx dma resource. Switching to poll mode\n");
+ sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
+ } else
+ sdd->tx_dma.dmach = res->start;
+
+ res = platform_get_resource(pdev, IORESOURCE_DMA, 1);
+ if (!res) {
+ dev_warn(&pdev->dev, "Unable to get SPI rx dma resource. Switching to poll mode\n");
+ sdd->port_conf->quirks = S3C64XX_SPI_QUIRK_POLL;
+ } else
+ sdd->rx_dma.dmach = res->start;
+ }
+
+ sdd->tx_dma.direction = DMA_MEM_TO_DEV;
+ sdd->rx_dma.direction = DMA_DEV_TO_MEM;
+
+ master->dev.of_node = pdev->dev.of_node;
+ master->bus_num = sdd->port_id;
+ master->setup = s3c64xx_spi_setup;
+ master->cleanup = s3c64xx_spi_cleanup;
+ master->prepare_transfer_hardware = s3c64xx_spi_prepare_transfer;
+ master->prepare_message = s3c64xx_spi_prepare_message;
+ master->transfer_one = s3c64xx_spi_transfer_one;
+ master->unprepare_transfer_hardware = s3c64xx_spi_unprepare_transfer;
+ master->num_chipselect = sci->num_cs;
+ master->dma_alignment = 8;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->auto_runtime_pm = true;
+ if (!is_polling(sdd))
+ master->can_dma = s3c64xx_spi_can_dma;
+
+ sdd->regs = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sdd->regs)) {
+ ret = PTR_ERR(sdd->regs);
+ goto err0;
+ }
+
+ if (sci->cfg_gpio && sci->cfg_gpio()) {
+ dev_err(&pdev->dev, "Unable to config gpio\n");
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ /* Setup clocks */
+ sdd->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(sdd->clk)) {
+ dev_err(&pdev->dev, "Unable to acquire clock 'spi'\n");
+ ret = PTR_ERR(sdd->clk);
+ goto err0;
+ }
+
+ if (clk_prepare_enable(sdd->clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock 'spi'\n");
+ ret = -EBUSY;
+ goto err0;
+ }
+
+ sprintf(clk_name, "spi_busclk%d", sci->src_clk_nr);
+ sdd->src_clk = devm_clk_get(&pdev->dev, clk_name);
+ if (IS_ERR(sdd->src_clk)) {
+ dev_err(&pdev->dev,
+ "Unable to acquire clock '%s'\n", clk_name);
+ ret = PTR_ERR(sdd->src_clk);
+ goto err2;
+ }
+
+ if (clk_prepare_enable(sdd->src_clk)) {
+ dev_err(&pdev->dev, "Couldn't enable clock '%s'\n", clk_name);
+ ret = -EBUSY;
+ goto err2;
+ }
+
+ /* Setup Deufult Mode */
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
+
+ spin_lock_init(&sdd->lock);
+ init_completion(&sdd->xfer_completion);
+
+ ret = devm_request_irq(&pdev->dev, irq, s3c64xx_spi_irq, 0,
+ "spi-s3c64xx", sdd);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "Failed to request IRQ %d: %d\n",
+ irq, ret);
+ goto err3;
+ }
+
+ writel(S3C64XX_SPI_INT_RX_OVERRUN_EN | S3C64XX_SPI_INT_RX_UNDERRUN_EN |
+ S3C64XX_SPI_INT_TX_OVERRUN_EN | S3C64XX_SPI_INT_TX_UNDERRUN_EN,
+ sdd->regs + S3C64XX_SPI_INT_EN);
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret != 0) {
+ dev_err(&pdev->dev, "cannot register SPI master: %d\n", ret);
+ goto err3;
+ }
+
+ dev_dbg(&pdev->dev, "Samsung SoC SPI Driver loaded for Bus SPI-%d with %d Slaves attached\n",
+ sdd->port_id, master->num_chipselect);
+ dev_dbg(&pdev->dev, "\tIOmem=[%pR]\tDMA=[Rx-%d, Tx-%d]\n",
+ mem_res,
+ sdd->rx_dma.dmach, sdd->tx_dma.dmach);
+
+ return 0;
+
+err3:
+ clk_disable_unprepare(sdd->src_clk);
+err2:
+ clk_disable_unprepare(sdd->clk);
+err0:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int s3c64xx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = spi_master_get(platform_get_drvdata(pdev));
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ pm_runtime_disable(&pdev->dev);
+
+ writel(0, sdd->regs + S3C64XX_SPI_INT_EN);
+
+ clk_disable_unprepare(sdd->src_clk);
+
+ clk_disable_unprepare(sdd->clk);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int s3c64xx_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ int ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ if (!pm_runtime_suspended(dev)) {
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+ }
+
+ sdd->cur_speed = 0; /* Output Clock is stopped */
+
+ return 0;
+}
+
+static int s3c64xx_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ struct s3c64xx_spi_info *sci = sdd->cntrlr_info;
+
+ if (sci->cfg_gpio)
+ sci->cfg_gpio();
+
+ if (!pm_runtime_suspended(dev)) {
+ clk_prepare_enable(sdd->src_clk);
+ clk_prepare_enable(sdd->clk);
+ }
+
+ s3c64xx_spi_hwinit(sdd, sdd->port_id);
+
+ return spi_master_resume(master);
+}
+#endif /* CONFIG_PM_SLEEP */
+
+#ifdef CONFIG_PM
+static int s3c64xx_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sdd->clk);
+ clk_disable_unprepare(sdd->src_clk);
+
+ return 0;
+}
+
+static int s3c64xx_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct s3c64xx_spi_driver_data *sdd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sdd->src_clk);
+ if (ret != 0)
+ return ret;
+
+ ret = clk_prepare_enable(sdd->clk);
+ if (ret != 0) {
+ clk_disable_unprepare(sdd->src_clk);
+ return ret;
+ }
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops s3c64xx_spi_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(s3c64xx_spi_suspend, s3c64xx_spi_resume)
+ SET_RUNTIME_PM_OPS(s3c64xx_spi_runtime_suspend,
+ s3c64xx_spi_runtime_resume, NULL)
+};
+
+static struct s3c64xx_spi_port_config s3c2443_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+ .high_speed = true,
+};
+
+static struct s3c64xx_spi_port_config s3c6410_spi_port_config = {
+ .fifo_lvl_mask = { 0x7f, 0x7F },
+ .rx_lvl_offset = 13,
+ .tx_st_done = 21,
+};
+
+static struct s3c64xx_spi_port_config s5pv210_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+};
+
+static struct s3c64xx_spi_port_config exynos4_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+};
+
+static struct s3c64xx_spi_port_config exynos5440_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff },
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_POLL,
+};
+
+static struct s3c64xx_spi_port_config exynos7_spi_port_config = {
+ .fifo_lvl_mask = { 0x1ff, 0x7F, 0x7F, 0x7F, 0x7F, 0x1ff},
+ .rx_lvl_offset = 15,
+ .tx_st_done = 25,
+ .high_speed = true,
+ .clk_from_cmu = true,
+ .quirks = S3C64XX_SPI_QUIRK_CS_AUTO,
+};
+
+static struct platform_device_id s3c64xx_spi_driver_ids[] = {
+ {
+ .name = "s3c2443-spi",
+ .driver_data = (kernel_ulong_t)&s3c2443_spi_port_config,
+ }, {
+ .name = "s3c6410-spi",
+ .driver_data = (kernel_ulong_t)&s3c6410_spi_port_config,
+ }, {
+ .name = "s5pv210-spi",
+ .driver_data = (kernel_ulong_t)&s5pv210_spi_port_config,
+ }, {
+ .name = "exynos4210-spi",
+ .driver_data = (kernel_ulong_t)&exynos4_spi_port_config,
+ },
+ { },
+};
+
+static const struct of_device_id s3c64xx_spi_dt_match[] = {
+ { .compatible = "samsung,s3c2443-spi",
+ .data = (void *)&s3c2443_spi_port_config,
+ },
+ { .compatible = "samsung,s3c6410-spi",
+ .data = (void *)&s3c6410_spi_port_config,
+ },
+ { .compatible = "samsung,s5pv210-spi",
+ .data = (void *)&s5pv210_spi_port_config,
+ },
+ { .compatible = "samsung,exynos4210-spi",
+ .data = (void *)&exynos4_spi_port_config,
+ },
+ { .compatible = "samsung,exynos5440-spi",
+ .data = (void *)&exynos5440_spi_port_config,
+ },
+ { .compatible = "samsung,exynos7-spi",
+ .data = (void *)&exynos7_spi_port_config,
+ },
+ { },
+};
+MODULE_DEVICE_TABLE(of, s3c64xx_spi_dt_match);
+
+static struct platform_driver s3c64xx_spi_driver = {
+ .driver = {
+ .name = "s3c64xx-spi",
+ .pm = &s3c64xx_spi_pm,
+ .of_match_table = of_match_ptr(s3c64xx_spi_dt_match),
+ },
+ .probe = s3c64xx_spi_probe,
+ .remove = s3c64xx_spi_remove,
+ .id_table = s3c64xx_spi_driver_ids,
+};
+MODULE_ALIAS("platform:s3c64xx-spi");
+
+module_platform_driver(s3c64xx_spi_driver);
+
+MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>");
+MODULE_DESCRIPTION("S3C64XX SPI Controller Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sc18is602.c b/drivers/spi/spi-sc18is602.c
new file mode 100644
index 000000000..36af4d48a
--- /dev/null
+++ b/drivers/spi/spi-sc18is602.c
@@ -0,0 +1,329 @@
+/*
+ * NXP SC18IS602/603 SPI driver
+ *
+ * Copyright (C) Guenter Roeck <linux@roeck-us.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/i2c.h>
+#include <linux/delay.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/platform_data/sc18is602.h>
+
+enum chips { sc18is602, sc18is602b, sc18is603 };
+
+#define SC18IS602_BUFSIZ 200
+#define SC18IS602_CLOCK 7372000
+
+#define SC18IS602_MODE_CPHA BIT(2)
+#define SC18IS602_MODE_CPOL BIT(3)
+#define SC18IS602_MODE_LSB_FIRST BIT(5)
+#define SC18IS602_MODE_CLOCK_DIV_4 0x0
+#define SC18IS602_MODE_CLOCK_DIV_16 0x1
+#define SC18IS602_MODE_CLOCK_DIV_64 0x2
+#define SC18IS602_MODE_CLOCK_DIV_128 0x3
+
+struct sc18is602 {
+ struct spi_master *master;
+ struct device *dev;
+ u8 ctrl;
+ u32 freq;
+ u32 speed;
+
+ /* I2C data */
+ struct i2c_client *client;
+ enum chips id;
+ u8 buffer[SC18IS602_BUFSIZ + 1];
+ int tlen; /* Data queued for tx in buffer */
+ int rindex; /* Receive data index in buffer */
+};
+
+static int sc18is602_wait_ready(struct sc18is602 *hw, int len)
+{
+ int i, err;
+ int usecs = 1000000 * len / hw->speed + 1;
+ u8 dummy[1];
+
+ for (i = 0; i < 10; i++) {
+ err = i2c_master_recv(hw->client, dummy, 1);
+ if (err >= 0)
+ return 0;
+ usleep_range(usecs, usecs * 2);
+ }
+ return -ETIMEDOUT;
+}
+
+static int sc18is602_txrx(struct sc18is602 *hw, struct spi_message *msg,
+ struct spi_transfer *t, bool do_transfer)
+{
+ unsigned int len = t->len;
+ int ret;
+
+ if (hw->tlen == 0) {
+ /* First byte (I2C command) is chip select */
+ hw->buffer[0] = 1 << msg->spi->chip_select;
+ hw->tlen = 1;
+ hw->rindex = 0;
+ }
+ /*
+ * We can not immediately send data to the chip, since each I2C message
+ * resembles a full SPI message (from CS active to CS inactive).
+ * Enqueue messages up to the first read or until do_transfer is true.
+ */
+ if (t->tx_buf) {
+ memcpy(&hw->buffer[hw->tlen], t->tx_buf, len);
+ hw->tlen += len;
+ if (t->rx_buf)
+ do_transfer = true;
+ else
+ hw->rindex = hw->tlen - 1;
+ } else if (t->rx_buf) {
+ /*
+ * For receive-only transfers we still need to perform a dummy
+ * write to receive data from the SPI chip.
+ * Read data starts at the end of transmit data (minus 1 to
+ * account for CS).
+ */
+ hw->rindex = hw->tlen - 1;
+ memset(&hw->buffer[hw->tlen], 0, len);
+ hw->tlen += len;
+ do_transfer = true;
+ }
+
+ if (do_transfer && hw->tlen > 1) {
+ ret = sc18is602_wait_ready(hw, SC18IS602_BUFSIZ);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_send(hw->client, hw->buffer, hw->tlen);
+ if (ret < 0)
+ return ret;
+ if (ret != hw->tlen)
+ return -EIO;
+
+ if (t->rx_buf) {
+ int rlen = hw->rindex + len;
+
+ ret = sc18is602_wait_ready(hw, hw->tlen);
+ if (ret < 0)
+ return ret;
+ ret = i2c_master_recv(hw->client, hw->buffer, rlen);
+ if (ret < 0)
+ return ret;
+ if (ret != rlen)
+ return -EIO;
+ memcpy(t->rx_buf, &hw->buffer[hw->rindex], len);
+ }
+ hw->tlen = 0;
+ }
+ return len;
+}
+
+static int sc18is602_setup_transfer(struct sc18is602 *hw, u32 hz, u8 mode)
+{
+ u8 ctrl = 0;
+ int ret;
+
+ if (mode & SPI_CPHA)
+ ctrl |= SC18IS602_MODE_CPHA;
+ if (mode & SPI_CPOL)
+ ctrl |= SC18IS602_MODE_CPOL;
+ if (mode & SPI_LSB_FIRST)
+ ctrl |= SC18IS602_MODE_LSB_FIRST;
+
+ /* Find the closest clock speed */
+ if (hz >= hw->freq / 4) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_4;
+ hw->speed = hw->freq / 4;
+ } else if (hz >= hw->freq / 16) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_16;
+ hw->speed = hw->freq / 16;
+ } else if (hz >= hw->freq / 64) {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_64;
+ hw->speed = hw->freq / 64;
+ } else {
+ ctrl |= SC18IS602_MODE_CLOCK_DIV_128;
+ hw->speed = hw->freq / 128;
+ }
+
+ /*
+ * Don't do anything if the control value did not change. The initial
+ * value of 0xff for hw->ctrl ensures that the correct mode will be set
+ * with the first call to this function.
+ */
+ if (ctrl == hw->ctrl)
+ return 0;
+
+ ret = i2c_smbus_write_byte_data(hw->client, 0xf0, ctrl);
+ if (ret < 0)
+ return ret;
+
+ hw->ctrl = ctrl;
+
+ return 0;
+}
+
+static int sc18is602_check_transfer(struct spi_device *spi,
+ struct spi_transfer *t, int tlen)
+{
+ if (t && t->len + tlen > SC18IS602_BUFSIZ)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sc18is602_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct sc18is602 *hw = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0;
+
+ hw->tlen = 0;
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ bool do_transfer;
+
+ status = sc18is602_check_transfer(spi, t, hw->tlen);
+ if (status < 0)
+ break;
+
+ status = sc18is602_setup_transfer(hw, t->speed_hz, spi->mode);
+ if (status < 0)
+ break;
+
+ do_transfer = t->cs_change || list_is_last(&t->transfer_list,
+ &m->transfers);
+
+ if (t->len) {
+ status = sc18is602_txrx(hw, m, t, do_transfer);
+ if (status < 0)
+ break;
+ m->actual_length += status;
+ }
+ status = 0;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+ }
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int sc18is602_setup(struct spi_device *spi)
+{
+ struct sc18is602 *hw = spi_master_get_devdata(spi->master);
+
+ /* SC18IS602 does not support CS2 */
+ if (hw->id == sc18is602 && spi->chip_select == 2)
+ return -ENXIO;
+
+ return 0;
+}
+
+static int sc18is602_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct sc18is602_platform_data *pdata = dev_get_platdata(dev);
+ struct sc18is602 *hw;
+ struct spi_master *master;
+ int error;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C |
+ I2C_FUNC_SMBUS_WRITE_BYTE_DATA))
+ return -EINVAL;
+
+ master = spi_alloc_master(dev, sizeof(struct sc18is602));
+ if (!master)
+ return -ENOMEM;
+
+ hw = spi_master_get_devdata(master);
+ i2c_set_clientdata(client, hw);
+
+ hw->master = master;
+ hw->client = client;
+ hw->dev = dev;
+ hw->ctrl = 0xff;
+
+ hw->id = id->driver_data;
+
+ switch (hw->id) {
+ case sc18is602:
+ case sc18is602b:
+ master->num_chipselect = 4;
+ hw->freq = SC18IS602_CLOCK;
+ break;
+ case sc18is603:
+ master->num_chipselect = 2;
+ if (pdata) {
+ hw->freq = pdata->clock_frequency;
+ } else {
+ const __be32 *val;
+ int len;
+
+ val = of_get_property(np, "clock-frequency", &len);
+ if (val && len >= sizeof(__be32))
+ hw->freq = be32_to_cpup(val);
+ }
+ if (!hw->freq)
+ hw->freq = SC18IS602_CLOCK;
+ break;
+ }
+ master->bus_num = np ? -1 : client->adapter->nr;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->setup = sc18is602_setup;
+ master->transfer_one_message = sc18is602_transfer_one;
+ master->dev.of_node = np;
+ master->min_speed_hz = hw->freq / 128;
+ master->max_speed_hz = hw->freq / 4;
+
+ error = devm_spi_register_master(dev, master);
+ if (error)
+ goto error_reg;
+
+ return 0;
+
+error_reg:
+ spi_master_put(master);
+ return error;
+}
+
+static const struct i2c_device_id sc18is602_id[] = {
+ { "sc18is602", sc18is602 },
+ { "sc18is602b", sc18is602b },
+ { "sc18is603", sc18is603 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, sc18is602_id);
+
+static struct i2c_driver sc18is602_driver = {
+ .driver = {
+ .name = "sc18is602",
+ },
+ .probe = sc18is602_probe,
+ .id_table = sc18is602_id,
+};
+
+module_i2c_driver(sc18is602_driver);
+
+MODULE_DESCRIPTION("SC18IC602/603 SPI Master Driver");
+MODULE_AUTHOR("Guenter Roeck");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sh-hspi.c b/drivers/spi/spi-sh-hspi.c
new file mode 100644
index 000000000..20e800e70
--- /dev/null
+++ b/drivers/spi/spi-sh-hspi.c
@@ -0,0 +1,321 @@
+/*
+ * SuperH HSPI bus driver
+ *
+ * Copyright (C) 2011 Kuninori Morimoto
+ *
+ * Based on spi-sh.c:
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/sh_hspi.h>
+
+#define SPCR 0x00
+#define SPSR 0x04
+#define SPSCR 0x08
+#define SPTBR 0x0C
+#define SPRBR 0x10
+#define SPCR2 0x14
+
+/* SPSR */
+#define RXFL (1 << 2)
+
+struct hspi_priv {
+ void __iomem *addr;
+ struct spi_master *master;
+ struct device *dev;
+ struct clk *clk;
+};
+
+/*
+ * basic function
+ */
+static void hspi_write(struct hspi_priv *hspi, int reg, u32 val)
+{
+ iowrite32(val, hspi->addr + reg);
+}
+
+static u32 hspi_read(struct hspi_priv *hspi, int reg)
+{
+ return ioread32(hspi->addr + reg);
+}
+
+static void hspi_bit_set(struct hspi_priv *hspi, int reg, u32 mask, u32 set)
+{
+ u32 val = hspi_read(hspi, reg);
+
+ val &= ~mask;
+ val |= set & mask;
+
+ hspi_write(hspi, reg, val);
+}
+
+/*
+ * transfer function
+ */
+static int hspi_status_check_timeout(struct hspi_priv *hspi, u32 mask, u32 val)
+{
+ int t = 256;
+
+ while (t--) {
+ if ((mask & hspi_read(hspi, SPSR)) == val)
+ return 0;
+
+ udelay(10);
+ }
+
+ dev_err(hspi->dev, "timeout\n");
+ return -ETIMEDOUT;
+}
+
+/*
+ * spi master function
+ */
+
+#define hspi_hw_cs_enable(hspi) hspi_hw_cs_ctrl(hspi, 0)
+#define hspi_hw_cs_disable(hspi) hspi_hw_cs_ctrl(hspi, 1)
+static void hspi_hw_cs_ctrl(struct hspi_priv *hspi, int hi)
+{
+ hspi_bit_set(hspi, SPSCR, (1 << 6), (hi) << 6);
+}
+
+static void hspi_hw_setup(struct hspi_priv *hspi,
+ struct spi_message *msg,
+ struct spi_transfer *t)
+{
+ struct spi_device *spi = msg->spi;
+ struct device *dev = hspi->dev;
+ u32 spcr, idiv_clk;
+ u32 rate, best_rate, min, tmp;
+
+ /*
+ * find best IDIV/CLKCx settings
+ */
+ min = ~0;
+ best_rate = 0;
+ spcr = 0;
+ for (idiv_clk = 0x00; idiv_clk <= 0x3F; idiv_clk++) {
+ rate = clk_get_rate(hspi->clk);
+
+ /* IDIV calculation */
+ if (idiv_clk & (1 << 5))
+ rate /= 128;
+ else
+ rate /= 16;
+
+ /* CLKCx calculation */
+ rate /= (((idiv_clk & 0x1F) + 1) * 2);
+
+ /* save best settings */
+ tmp = abs(t->speed_hz - rate);
+ if (tmp < min) {
+ min = tmp;
+ spcr = idiv_clk;
+ best_rate = rate;
+ }
+ }
+
+ if (spi->mode & SPI_CPHA)
+ spcr |= 1 << 7;
+ if (spi->mode & SPI_CPOL)
+ spcr |= 1 << 6;
+
+ dev_dbg(dev, "speed %d/%d\n", t->speed_hz, best_rate);
+
+ hspi_write(hspi, SPCR, spcr);
+ hspi_write(hspi, SPSR, 0x0);
+ hspi_write(hspi, SPSCR, 0x21); /* master mode / CS control */
+}
+
+static int hspi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct hspi_priv *hspi = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ u32 tx;
+ u32 rx;
+ int ret, i;
+ unsigned int cs_change;
+ const int nsecs = 50;
+
+ dev_dbg(hspi->dev, "%s\n", __func__);
+
+ cs_change = 1;
+ ret = 0;
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+
+ if (cs_change) {
+ hspi_hw_setup(hspi, msg, t);
+ hspi_hw_cs_enable(hspi);
+ ndelay(nsecs);
+ }
+ cs_change = t->cs_change;
+
+ for (i = 0; i < t->len; i++) {
+
+ /* wait remains */
+ ret = hspi_status_check_timeout(hspi, 0x1, 0);
+ if (ret < 0)
+ break;
+
+ tx = 0;
+ if (t->tx_buf)
+ tx = (u32)((u8 *)t->tx_buf)[i];
+
+ hspi_write(hspi, SPTBR, tx);
+
+ /* wait receive */
+ ret = hspi_status_check_timeout(hspi, 0x4, 0x4);
+ if (ret < 0)
+ break;
+
+ rx = hspi_read(hspi, SPRBR);
+ if (t->rx_buf)
+ ((u8 *)t->rx_buf)[i] = (u8)rx;
+
+ }
+
+ msg->actual_length += t->len;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ ndelay(nsecs);
+ }
+ }
+
+ msg->status = ret;
+ if (!cs_change) {
+ ndelay(nsecs);
+ hspi_hw_cs_disable(hspi);
+ }
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+static int hspi_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct hspi_priv *hspi;
+ struct clk *clk;
+ int ret;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*hspi));
+ if (!master) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(clk)) {
+ dev_err(&pdev->dev, "couldn't get clock\n");
+ ret = -EINVAL;
+ goto error0;
+ }
+
+ hspi = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, hspi);
+
+ /* init hspi */
+ hspi->master = master;
+ hspi->dev = &pdev->dev;
+ hspi->clk = clk;
+ hspi->addr = devm_ioremap(hspi->dev,
+ res->start, resource_size(res));
+ if (!hspi->addr) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+
+ pm_runtime_enable(&pdev->dev);
+
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = hspi_transfer_one_message;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto error2;
+ }
+
+ return 0;
+
+ error2:
+ pm_runtime_disable(&pdev->dev);
+ error1:
+ clk_put(clk);
+ error0:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int hspi_remove(struct platform_device *pdev)
+{
+ struct hspi_priv *hspi = platform_get_drvdata(pdev);
+
+ pm_runtime_disable(&pdev->dev);
+
+ clk_put(hspi->clk);
+
+ return 0;
+}
+
+static const struct of_device_id hspi_of_match[] = {
+ { .compatible = "renesas,hspi", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, hspi_of_match);
+
+static struct platform_driver hspi_driver = {
+ .probe = hspi_probe,
+ .remove = hspi_remove,
+ .driver = {
+ .name = "sh-hspi",
+ .of_match_table = hspi_of_match,
+ },
+};
+module_platform_driver(hspi_driver);
+
+MODULE_DESCRIPTION("SuperH HSPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>");
+MODULE_ALIAS("platform:sh-hspi");
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
new file mode 100644
index 000000000..bcc7c635d
--- /dev/null
+++ b/drivers/spi/spi-sh-msiof.c
@@ -0,0 +1,1291 @@
+/*
+ * SuperH MSIOF SPI Master Interface
+ *
+ * Copyright (c) 2009 Magnus Damm
+ * Copyright (C) 2014 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bitmap.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/sh_dma.h>
+
+#include <linux/spi/sh_msiof.h>
+#include <linux/spi/spi.h>
+
+#include <asm/unaligned.h>
+
+
+struct sh_msiof_chipdata {
+ u16 tx_fifo_size;
+ u16 rx_fifo_size;
+ u16 master_flags;
+};
+
+struct sh_msiof_spi_priv {
+ struct spi_master *master;
+ void __iomem *mapbase;
+ struct clk *clk;
+ struct platform_device *pdev;
+ const struct sh_msiof_chipdata *chipdata;
+ struct sh_msiof_spi_info *info;
+ struct completion done;
+ int tx_fifo_size;
+ int rx_fifo_size;
+ void *tx_dma_page;
+ void *rx_dma_page;
+ dma_addr_t tx_dma_addr;
+ dma_addr_t rx_dma_addr;
+};
+
+#define TMDR1 0x00 /* Transmit Mode Register 1 */
+#define TMDR2 0x04 /* Transmit Mode Register 2 */
+#define TMDR3 0x08 /* Transmit Mode Register 3 */
+#define RMDR1 0x10 /* Receive Mode Register 1 */
+#define RMDR2 0x14 /* Receive Mode Register 2 */
+#define RMDR3 0x18 /* Receive Mode Register 3 */
+#define TSCR 0x20 /* Transmit Clock Select Register */
+#define RSCR 0x22 /* Receive Clock Select Register (SH, A1, APE6) */
+#define CTR 0x28 /* Control Register */
+#define FCTR 0x30 /* FIFO Control Register */
+#define STR 0x40 /* Status Register */
+#define IER 0x44 /* Interrupt Enable Register */
+#define TDR1 0x48 /* Transmit Control Data Register 1 (SH, A1) */
+#define TDR2 0x4c /* Transmit Control Data Register 2 (SH, A1) */
+#define TFDR 0x50 /* Transmit FIFO Data Register */
+#define RDR1 0x58 /* Receive Control Data Register 1 (SH, A1) */
+#define RDR2 0x5c /* Receive Control Data Register 2 (SH, A1) */
+#define RFDR 0x60 /* Receive FIFO Data Register */
+
+/* TMDR1 and RMDR1 */
+#define MDR1_TRMD 0x80000000 /* Transfer Mode (1 = Master mode) */
+#define MDR1_SYNCMD_MASK 0x30000000 /* SYNC Mode */
+#define MDR1_SYNCMD_SPI 0x20000000 /* Level mode/SPI */
+#define MDR1_SYNCMD_LR 0x30000000 /* L/R mode */
+#define MDR1_SYNCAC_SHIFT 25 /* Sync Polarity (1 = Active-low) */
+#define MDR1_BITLSB_SHIFT 24 /* MSB/LSB First (1 = LSB first) */
+#define MDR1_DTDL_SHIFT 20 /* Data Pin Bit Delay for MSIOF_SYNC */
+#define MDR1_SYNCDL_SHIFT 16 /* Frame Sync Signal Timing Delay */
+#define MDR1_FLD_MASK 0x0000000c /* Frame Sync Signal Interval (0-3) */
+#define MDR1_FLD_SHIFT 2
+#define MDR1_XXSTP 0x00000001 /* Transmission/Reception Stop on FIFO */
+/* TMDR1 */
+#define TMDR1_PCON 0x40000000 /* Transfer Signal Connection */
+
+/* TMDR2 and RMDR2 */
+#define MDR2_BITLEN1(i) (((i) - 1) << 24) /* Data Size (8-32 bits) */
+#define MDR2_WDLEN1(i) (((i) - 1) << 16) /* Word Count (1-64/256 (SH, A1))) */
+#define MDR2_GRPMASK1 0x00000001 /* Group Output Mask 1 (SH, A1) */
+
+#define MAX_WDLEN 256U
+
+/* TSCR and RSCR */
+#define SCR_BRPS_MASK 0x1f00 /* Prescaler Setting (1-32) */
+#define SCR_BRPS(i) (((i) - 1) << 8)
+#define SCR_BRDV_MASK 0x0007 /* Baud Rate Generator's Division Ratio */
+#define SCR_BRDV_DIV_2 0x0000
+#define SCR_BRDV_DIV_4 0x0001
+#define SCR_BRDV_DIV_8 0x0002
+#define SCR_BRDV_DIV_16 0x0003
+#define SCR_BRDV_DIV_32 0x0004
+#define SCR_BRDV_DIV_1 0x0007
+
+/* CTR */
+#define CTR_TSCKIZ_MASK 0xc0000000 /* Transmit Clock I/O Polarity Select */
+#define CTR_TSCKIZ_SCK 0x80000000 /* Disable SCK when TX disabled */
+#define CTR_TSCKIZ_POL_SHIFT 30 /* Transmit Clock Polarity */
+#define CTR_RSCKIZ_MASK 0x30000000 /* Receive Clock Polarity Select */
+#define CTR_RSCKIZ_SCK 0x20000000 /* Must match CTR_TSCKIZ_SCK */
+#define CTR_RSCKIZ_POL_SHIFT 28 /* Receive Clock Polarity */
+#define CTR_TEDG_SHIFT 27 /* Transmit Timing (1 = falling edge) */
+#define CTR_REDG_SHIFT 26 /* Receive Timing (1 = falling edge) */
+#define CTR_TXDIZ_MASK 0x00c00000 /* Pin Output When TX is Disabled */
+#define CTR_TXDIZ_LOW 0x00000000 /* 0 */
+#define CTR_TXDIZ_HIGH 0x00400000 /* 1 */
+#define CTR_TXDIZ_HIZ 0x00800000 /* High-impedance */
+#define CTR_TSCKE 0x00008000 /* Transmit Serial Clock Output Enable */
+#define CTR_TFSE 0x00004000 /* Transmit Frame Sync Signal Output Enable */
+#define CTR_TXE 0x00000200 /* Transmit Enable */
+#define CTR_RXE 0x00000100 /* Receive Enable */
+
+/* FCTR */
+#define FCTR_TFWM_MASK 0xe0000000 /* Transmit FIFO Watermark */
+#define FCTR_TFWM_64 0x00000000 /* Transfer Request when 64 empty stages */
+#define FCTR_TFWM_32 0x20000000 /* Transfer Request when 32 empty stages */
+#define FCTR_TFWM_24 0x40000000 /* Transfer Request when 24 empty stages */
+#define FCTR_TFWM_16 0x60000000 /* Transfer Request when 16 empty stages */
+#define FCTR_TFWM_12 0x80000000 /* Transfer Request when 12 empty stages */
+#define FCTR_TFWM_8 0xa0000000 /* Transfer Request when 8 empty stages */
+#define FCTR_TFWM_4 0xc0000000 /* Transfer Request when 4 empty stages */
+#define FCTR_TFWM_1 0xe0000000 /* Transfer Request when 1 empty stage */
+#define FCTR_TFUA_MASK 0x07f00000 /* Transmit FIFO Usable Area */
+#define FCTR_TFUA_SHIFT 20
+#define FCTR_TFUA(i) ((i) << FCTR_TFUA_SHIFT)
+#define FCTR_RFWM_MASK 0x0000e000 /* Receive FIFO Watermark */
+#define FCTR_RFWM_1 0x00000000 /* Transfer Request when 1 valid stages */
+#define FCTR_RFWM_4 0x00002000 /* Transfer Request when 4 valid stages */
+#define FCTR_RFWM_8 0x00004000 /* Transfer Request when 8 valid stages */
+#define FCTR_RFWM_16 0x00006000 /* Transfer Request when 16 valid stages */
+#define FCTR_RFWM_32 0x00008000 /* Transfer Request when 32 valid stages */
+#define FCTR_RFWM_64 0x0000a000 /* Transfer Request when 64 valid stages */
+#define FCTR_RFWM_128 0x0000c000 /* Transfer Request when 128 valid stages */
+#define FCTR_RFWM_256 0x0000e000 /* Transfer Request when 256 valid stages */
+#define FCTR_RFUA_MASK 0x00001ff0 /* Receive FIFO Usable Area (0x40 = full) */
+#define FCTR_RFUA_SHIFT 4
+#define FCTR_RFUA(i) ((i) << FCTR_RFUA_SHIFT)
+
+/* STR */
+#define STR_TFEMP 0x20000000 /* Transmit FIFO Empty */
+#define STR_TDREQ 0x10000000 /* Transmit Data Transfer Request */
+#define STR_TEOF 0x00800000 /* Frame Transmission End */
+#define STR_TFSERR 0x00200000 /* Transmit Frame Synchronization Error */
+#define STR_TFOVF 0x00100000 /* Transmit FIFO Overflow */
+#define STR_TFUDF 0x00080000 /* Transmit FIFO Underflow */
+#define STR_RFFUL 0x00002000 /* Receive FIFO Full */
+#define STR_RDREQ 0x00001000 /* Receive Data Transfer Request */
+#define STR_REOF 0x00000080 /* Frame Reception End */
+#define STR_RFSERR 0x00000020 /* Receive Frame Synchronization Error */
+#define STR_RFUDF 0x00000010 /* Receive FIFO Underflow */
+#define STR_RFOVF 0x00000008 /* Receive FIFO Overflow */
+
+/* IER */
+#define IER_TDMAE 0x80000000 /* Transmit Data DMA Transfer Req. Enable */
+#define IER_TFEMPE 0x20000000 /* Transmit FIFO Empty Enable */
+#define IER_TDREQE 0x10000000 /* Transmit Data Transfer Request Enable */
+#define IER_TEOFE 0x00800000 /* Frame Transmission End Enable */
+#define IER_TFSERRE 0x00200000 /* Transmit Frame Sync Error Enable */
+#define IER_TFOVFE 0x00100000 /* Transmit FIFO Overflow Enable */
+#define IER_TFUDFE 0x00080000 /* Transmit FIFO Underflow Enable */
+#define IER_RDMAE 0x00008000 /* Receive Data DMA Transfer Req. Enable */
+#define IER_RFFULE 0x00002000 /* Receive FIFO Full Enable */
+#define IER_RDREQE 0x00001000 /* Receive Data Transfer Request Enable */
+#define IER_REOFE 0x00000080 /* Frame Reception End Enable */
+#define IER_RFSERRE 0x00000020 /* Receive Frame Sync Error Enable */
+#define IER_RFUDFE 0x00000010 /* Receive FIFO Underflow Enable */
+#define IER_RFOVFE 0x00000008 /* Receive FIFO Overflow Enable */
+
+
+static u32 sh_msiof_read(struct sh_msiof_spi_priv *p, int reg_offs)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ return ioread16(p->mapbase + reg_offs);
+ default:
+ return ioread32(p->mapbase + reg_offs);
+ }
+}
+
+static void sh_msiof_write(struct sh_msiof_spi_priv *p, int reg_offs,
+ u32 value)
+{
+ switch (reg_offs) {
+ case TSCR:
+ case RSCR:
+ iowrite16(value, p->mapbase + reg_offs);
+ break;
+ default:
+ iowrite32(value, p->mapbase + reg_offs);
+ break;
+ }
+}
+
+static int sh_msiof_modify_ctr_wait(struct sh_msiof_spi_priv *p,
+ u32 clr, u32 set)
+{
+ u32 mask = clr | set;
+ u32 data;
+ int k;
+
+ data = sh_msiof_read(p, CTR);
+ data &= ~clr;
+ data |= set;
+ sh_msiof_write(p, CTR, data);
+
+ for (k = 100; k > 0; k--) {
+ if ((sh_msiof_read(p, CTR) & mask) == set)
+ break;
+
+ udelay(10);
+ }
+
+ return k > 0 ? 0 : -ETIMEDOUT;
+}
+
+static irqreturn_t sh_msiof_spi_irq(int irq, void *data)
+{
+ struct sh_msiof_spi_priv *p = data;
+
+ /* just disable the interrupt and wake up */
+ sh_msiof_write(p, IER, 0);
+ complete(&p->done);
+
+ return IRQ_HANDLED;
+}
+
+static struct {
+ unsigned short div;
+ unsigned short brdv;
+} const sh_msiof_spi_div_table[] = {
+ { 1, SCR_BRDV_DIV_1 },
+ { 2, SCR_BRDV_DIV_2 },
+ { 4, SCR_BRDV_DIV_4 },
+ { 8, SCR_BRDV_DIV_8 },
+ { 16, SCR_BRDV_DIV_16 },
+ { 32, SCR_BRDV_DIV_32 },
+};
+
+static void sh_msiof_spi_set_clk_regs(struct sh_msiof_spi_priv *p,
+ unsigned long parent_rate, u32 spi_hz)
+{
+ unsigned long div = 1024;
+ u32 brps, scr;
+ size_t k;
+
+ if (!WARN_ON(!spi_hz || !parent_rate))
+ div = DIV_ROUND_UP(parent_rate, spi_hz);
+
+ for (k = 0; k < ARRAY_SIZE(sh_msiof_spi_div_table); k++) {
+ brps = DIV_ROUND_UP(div, sh_msiof_spi_div_table[k].div);
+ if (brps <= 32) /* max of brdv is 32 */
+ break;
+ }
+
+ k = min_t(int, k, ARRAY_SIZE(sh_msiof_spi_div_table) - 1);
+
+ scr = sh_msiof_spi_div_table[k].brdv | SCR_BRPS(brps);
+ sh_msiof_write(p, TSCR, scr);
+ if (!(p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, RSCR, scr);
+}
+
+static u32 sh_msiof_get_delay_bit(u32 dtdl_or_syncdl)
+{
+ /*
+ * DTDL/SYNCDL bit : p->info->dtdl or p->info->syncdl
+ * b'000 : 0
+ * b'001 : 100
+ * b'010 : 200
+ * b'011 (SYNCDL only) : 300
+ * b'101 : 50
+ * b'110 : 150
+ */
+ if (dtdl_or_syncdl % 100)
+ return dtdl_or_syncdl / 100 + 5;
+ else
+ return dtdl_or_syncdl / 100;
+}
+
+static u32 sh_msiof_spi_get_dtdl_and_syncdl(struct sh_msiof_spi_priv *p)
+{
+ u32 val;
+
+ if (!p->info)
+ return 0;
+
+ /* check if DTDL and SYNCDL is allowed value */
+ if (p->info->dtdl > 200 || p->info->syncdl > 300) {
+ dev_warn(&p->pdev->dev, "DTDL or SYNCDL is too large\n");
+ return 0;
+ }
+
+ /* check if the sum of DTDL and SYNCDL becomes an integer value */
+ if ((p->info->dtdl + p->info->syncdl) % 100) {
+ dev_warn(&p->pdev->dev, "the sum of DTDL/SYNCDL is not good\n");
+ return 0;
+ }
+
+ val = sh_msiof_get_delay_bit(p->info->dtdl) << MDR1_DTDL_SHIFT;
+ val |= sh_msiof_get_delay_bit(p->info->syncdl) << MDR1_SYNCDL_SHIFT;
+
+ return val;
+}
+
+static void sh_msiof_spi_set_pin_regs(struct sh_msiof_spi_priv *p,
+ u32 cpol, u32 cpha,
+ u32 tx_hi_z, u32 lsb_first, u32 cs_high)
+{
+ u32 tmp;
+ int edge;
+
+ /*
+ * CPOL CPHA TSCKIZ RSCKIZ TEDG REDG
+ * 0 0 10 10 1 1
+ * 0 1 10 10 0 0
+ * 1 0 11 11 0 0
+ * 1 1 11 11 1 1
+ */
+ tmp = MDR1_SYNCMD_SPI | 1 << MDR1_FLD_SHIFT | MDR1_XXSTP;
+ tmp |= !cs_high << MDR1_SYNCAC_SHIFT;
+ tmp |= lsb_first << MDR1_BITLSB_SHIFT;
+ tmp |= sh_msiof_spi_get_dtdl_and_syncdl(p);
+ sh_msiof_write(p, TMDR1, tmp | MDR1_TRMD | TMDR1_PCON);
+ if (p->chipdata->master_flags & SPI_MASTER_MUST_TX) {
+ /* These bits are reserved if RX needs TX */
+ tmp &= ~0x0000ffff;
+ }
+ sh_msiof_write(p, RMDR1, tmp);
+
+ tmp = 0;
+ tmp |= CTR_TSCKIZ_SCK | cpol << CTR_TSCKIZ_POL_SHIFT;
+ tmp |= CTR_RSCKIZ_SCK | cpol << CTR_RSCKIZ_POL_SHIFT;
+
+ edge = cpol ^ !cpha;
+
+ tmp |= edge << CTR_TEDG_SHIFT;
+ tmp |= edge << CTR_REDG_SHIFT;
+ tmp |= tx_hi_z ? CTR_TXDIZ_HIZ : CTR_TXDIZ_LOW;
+ sh_msiof_write(p, CTR, tmp);
+}
+
+static void sh_msiof_spi_set_mode_regs(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, void *rx_buf,
+ u32 bits, u32 words)
+{
+ u32 dr2 = MDR2_BITLEN1(bits) | MDR2_WDLEN1(words);
+
+ if (tx_buf || (p->chipdata->master_flags & SPI_MASTER_MUST_TX))
+ sh_msiof_write(p, TMDR2, dr2);
+ else
+ sh_msiof_write(p, TMDR2, dr2 | MDR2_GRPMASK1);
+
+ if (rx_buf)
+ sh_msiof_write(p, RMDR2, dr2);
+}
+
+static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
+{
+ sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+}
+
+static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u8 *buf_8 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_8[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u16 *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_16[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_16u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u16 *buf_16 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_16[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, buf_32[k] << fs);
+}
+
+static void sh_msiof_spi_write_fifo_32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, get_unaligned(&buf_32[k]) << fs);
+}
+
+static void sh_msiof_spi_write_fifo_s32(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(buf_32[k] << fs));
+}
+
+static void sh_msiof_spi_write_fifo_s32u(struct sh_msiof_spi_priv *p,
+ const void *tx_buf, int words, int fs)
+{
+ const u32 *buf_32 = tx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ sh_msiof_write(p, TFDR, swab32(get_unaligned(&buf_32[k]) << fs));
+}
+
+static void sh_msiof_spi_read_fifo_8(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u8 *buf_8 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_8[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u16 *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_16[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_16u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u16 *buf_16 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_16[k]);
+}
+
+static void sh_msiof_spi_read_fifo_32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = sh_msiof_read(p, RFDR) >> fs;
+}
+
+static void sh_msiof_spi_read_fifo_32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(sh_msiof_read(p, RFDR) >> fs, &buf_32[k]);
+}
+
+static void sh_msiof_spi_read_fifo_s32(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ buf_32[k] = swab32(sh_msiof_read(p, RFDR) >> fs);
+}
+
+static void sh_msiof_spi_read_fifo_s32u(struct sh_msiof_spi_priv *p,
+ void *rx_buf, int words, int fs)
+{
+ u32 *buf_32 = rx_buf;
+ int k;
+
+ for (k = 0; k < words; k++)
+ put_unaligned(swab32(sh_msiof_read(p, RFDR) >> fs), &buf_32[k]);
+}
+
+static int sh_msiof_spi_setup(struct spi_device *spi)
+{
+ struct device_node *np = spi->master->dev.of_node;
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(spi->master);
+
+ pm_runtime_get_sync(&p->pdev->dev);
+
+ if (!np) {
+ /*
+ * Use spi->controller_data for CS (same strategy as spi_gpio),
+ * if any. otherwise let HW control CS
+ */
+ spi->cs_gpio = (uintptr_t)spi->controller_data;
+ }
+
+ /* Configure pins before deasserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !(spi->mode & SPI_CS_HIGH));
+
+
+ pm_runtime_put(&p->pdev->dev);
+
+ return 0;
+}
+
+static int sh_msiof_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ const struct spi_device *spi = msg->spi;
+
+ /* Configure pins before asserting CS */
+ sh_msiof_spi_set_pin_regs(p, !!(spi->mode & SPI_CPOL),
+ !!(spi->mode & SPI_CPHA),
+ !!(spi->mode & SPI_3WIRE),
+ !!(spi->mode & SPI_LSB_FIRST),
+ !!(spi->mode & SPI_CS_HIGH));
+ return 0;
+}
+
+static int sh_msiof_spi_start(struct sh_msiof_spi_priv *p, void *rx_buf)
+{
+ int ret;
+
+ /* setup clock and rx/tx signals */
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TSCKE);
+ if (rx_buf && !ret)
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_RXE);
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TXE);
+
+ /* start by setting frame bit */
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, 0, CTR_TFSE);
+
+ return ret;
+}
+
+static int sh_msiof_spi_stop(struct sh_msiof_spi_priv *p, void *rx_buf)
+{
+ int ret;
+
+ /* shut down frame, rx/tx and clock signals */
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TFSE, 0);
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TXE, 0);
+ if (rx_buf && !ret)
+ ret = sh_msiof_modify_ctr_wait(p, CTR_RXE, 0);
+ if (!ret)
+ ret = sh_msiof_modify_ctr_wait(p, CTR_TSCKE, 0);
+
+ return ret;
+}
+
+static int sh_msiof_spi_txrx_once(struct sh_msiof_spi_priv *p,
+ void (*tx_fifo)(struct sh_msiof_spi_priv *,
+ const void *, int, int),
+ void (*rx_fifo)(struct sh_msiof_spi_priv *,
+ void *, int, int),
+ const void *tx_buf, void *rx_buf,
+ int words, int bits)
+{
+ int fifo_shift;
+ int ret;
+
+ /* limit maximum word transfer to rx/tx fifo size */
+ if (tx_buf)
+ words = min_t(int, words, p->tx_fifo_size);
+ if (rx_buf)
+ words = min_t(int, words, p->rx_fifo_size);
+
+ /* the fifo contents need shifting */
+ fifo_shift = 32 - bits;
+
+ /* default FIFO watermarks for PIO */
+ sh_msiof_write(p, FCTR, 0);
+
+ /* setup msiof transfer mode registers */
+ sh_msiof_spi_set_mode_regs(p, tx_buf, rx_buf, bits, words);
+ sh_msiof_write(p, IER, IER_TEOFE | IER_REOFE);
+
+ /* write tx fifo */
+ if (tx_buf)
+ tx_fifo(p, tx_buf, words, fifo_shift);
+
+ reinit_completion(&p->done);
+
+ ret = sh_msiof_spi_start(p, rx_buf);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto stop_ier;
+ }
+
+ /* wait for tx fifo to be emptied / rx fifo to be filled */
+ if (!wait_for_completion_timeout(&p->done, HZ)) {
+ dev_err(&p->pdev->dev, "PIO timeout\n");
+ ret = -ETIMEDOUT;
+ goto stop_reset;
+ }
+
+ /* read rx fifo */
+ if (rx_buf)
+ rx_fifo(p, rx_buf, words, fifo_shift);
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ ret = sh_msiof_spi_stop(p, rx_buf);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ return ret;
+ }
+
+ return words;
+
+stop_reset:
+ sh_msiof_reset_str(p);
+ sh_msiof_spi_stop(p, rx_buf);
+stop_ier:
+ sh_msiof_write(p, IER, 0);
+ return ret;
+}
+
+static void sh_msiof_dma_complete(void *arg)
+{
+ struct sh_msiof_spi_priv *p = arg;
+
+ sh_msiof_write(p, IER, 0);
+ complete(&p->done);
+}
+
+static int sh_msiof_dma_once(struct sh_msiof_spi_priv *p, const void *tx,
+ void *rx, unsigned int len)
+{
+ u32 ier_bits = 0;
+ struct dma_async_tx_descriptor *desc_tx = NULL, *desc_rx = NULL;
+ dma_cookie_t cookie;
+ int ret;
+
+ /* First prepare and submit the DMA request(s), as this may fail */
+ if (rx) {
+ ier_bits |= IER_RDREQE | IER_RDMAE;
+ desc_rx = dmaengine_prep_slave_single(p->master->dma_rx,
+ p->rx_dma_addr, len, DMA_FROM_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx)
+ return -EAGAIN;
+
+ desc_rx->callback = sh_msiof_dma_complete;
+ desc_rx->callback_param = p;
+ cookie = dmaengine_submit(desc_rx);
+ if (dma_submit_error(cookie))
+ return cookie;
+ }
+
+ if (tx) {
+ ier_bits |= IER_TDREQE | IER_TDMAE;
+ dma_sync_single_for_device(p->master->dma_tx->device->dev,
+ p->tx_dma_addr, len, DMA_TO_DEVICE);
+ desc_tx = dmaengine_prep_slave_single(p->master->dma_tx,
+ p->tx_dma_addr, len, DMA_TO_DEVICE,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ ret = -EAGAIN;
+ goto no_dma_tx;
+ }
+
+ if (rx) {
+ /* No callback */
+ desc_tx->callback = NULL;
+ } else {
+ desc_tx->callback = sh_msiof_dma_complete;
+ desc_tx->callback_param = p;
+ }
+ cookie = dmaengine_submit(desc_tx);
+ if (dma_submit_error(cookie)) {
+ ret = cookie;
+ goto no_dma_tx;
+ }
+ }
+
+ /* 1 stage FIFO watermarks for DMA */
+ sh_msiof_write(p, FCTR, FCTR_TFWM_1 | FCTR_RFWM_1);
+
+ /* setup msiof transfer mode registers (32-bit words) */
+ sh_msiof_spi_set_mode_regs(p, tx, rx, 32, len / 4);
+
+ sh_msiof_write(p, IER, ier_bits);
+
+ reinit_completion(&p->done);
+
+ /* Now start DMA */
+ if (rx)
+ dma_async_issue_pending(p->master->dma_rx);
+ if (tx)
+ dma_async_issue_pending(p->master->dma_tx);
+
+ ret = sh_msiof_spi_start(p, rx);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to start hardware\n");
+ goto stop_dma;
+ }
+
+ /* wait for tx fifo to be emptied / rx fifo to be filled */
+ if (!wait_for_completion_timeout(&p->done, HZ)) {
+ dev_err(&p->pdev->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ goto stop_reset;
+ }
+
+ /* clear status bits */
+ sh_msiof_reset_str(p);
+
+ ret = sh_msiof_spi_stop(p, rx);
+ if (ret) {
+ dev_err(&p->pdev->dev, "failed to shut down hardware\n");
+ return ret;
+ }
+
+ if (rx)
+ dma_sync_single_for_cpu(p->master->dma_rx->device->dev,
+ p->rx_dma_addr, len,
+ DMA_FROM_DEVICE);
+
+ return 0;
+
+stop_reset:
+ sh_msiof_reset_str(p);
+ sh_msiof_spi_stop(p, rx);
+stop_dma:
+ if (tx)
+ dmaengine_terminate_all(p->master->dma_tx);
+no_dma_tx:
+ if (rx)
+ dmaengine_terminate_all(p->master->dma_rx);
+ sh_msiof_write(p, IER, 0);
+ return ret;
+}
+
+static void copy_bswap32(u32 *dst, const u32 *src, unsigned int words)
+{
+ /* src or dst can be unaligned, but not both */
+ if ((unsigned long)src & 3) {
+ while (words--) {
+ *dst++ = swab32(get_unaligned(src));
+ src++;
+ }
+ } else if ((unsigned long)dst & 3) {
+ while (words--) {
+ put_unaligned(swab32(*src++), dst);
+ dst++;
+ }
+ } else {
+ while (words--)
+ *dst++ = swab32(*src++);
+ }
+}
+
+static void copy_wswap32(u32 *dst, const u32 *src, unsigned int words)
+{
+ /* src or dst can be unaligned, but not both */
+ if ((unsigned long)src & 3) {
+ while (words--) {
+ *dst++ = swahw32(get_unaligned(src));
+ src++;
+ }
+ } else if ((unsigned long)dst & 3) {
+ while (words--) {
+ put_unaligned(swahw32(*src++), dst);
+ dst++;
+ }
+ } else {
+ while (words--)
+ *dst++ = swahw32(*src++);
+ }
+}
+
+static void copy_plain32(u32 *dst, const u32 *src, unsigned int words)
+{
+ memcpy(dst, src, words * 4);
+}
+
+static int sh_msiof_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sh_msiof_spi_priv *p = spi_master_get_devdata(master);
+ void (*copy32)(u32 *, const u32 *, unsigned int);
+ void (*tx_fifo)(struct sh_msiof_spi_priv *, const void *, int, int);
+ void (*rx_fifo)(struct sh_msiof_spi_priv *, void *, int, int);
+ const void *tx_buf = t->tx_buf;
+ void *rx_buf = t->rx_buf;
+ unsigned int len = t->len;
+ unsigned int bits = t->bits_per_word;
+ unsigned int bytes_per_word;
+ unsigned int words;
+ int n;
+ bool swab;
+ int ret;
+
+ /* setup clocks (clock already enabled in chipselect()) */
+ sh_msiof_spi_set_clk_regs(p, clk_get_rate(p->clk), t->speed_hz);
+
+ while (master->dma_tx && len > 15) {
+ /*
+ * DMA supports 32-bit words only, hence pack 8-bit and 16-bit
+ * words, with byte resp. word swapping.
+ */
+ unsigned int l = min(len, MAX_WDLEN * 4);
+
+ if (bits <= 8) {
+ if (l & 3)
+ break;
+ copy32 = copy_bswap32;
+ } else if (bits <= 16) {
+ if (l & 1)
+ break;
+ copy32 = copy_wswap32;
+ } else {
+ copy32 = copy_plain32;
+ }
+
+ if (tx_buf)
+ copy32(p->tx_dma_page, tx_buf, l / 4);
+
+ ret = sh_msiof_dma_once(p, tx_buf, rx_buf, l);
+ if (ret == -EAGAIN) {
+ pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
+ dev_driver_string(&p->pdev->dev),
+ dev_name(&p->pdev->dev));
+ break;
+ }
+ if (ret)
+ return ret;
+
+ if (rx_buf) {
+ copy32(rx_buf, p->rx_dma_page, l / 4);
+ rx_buf += l;
+ }
+ if (tx_buf)
+ tx_buf += l;
+
+ len -= l;
+ if (!len)
+ return 0;
+ }
+
+ if (bits <= 8 && len > 15 && !(len & 3)) {
+ bits = 32;
+ swab = true;
+ } else {
+ swab = false;
+ }
+
+ /* setup bytes per word and fifo read/write functions */
+ if (bits <= 8) {
+ bytes_per_word = 1;
+ tx_fifo = sh_msiof_spi_write_fifo_8;
+ rx_fifo = sh_msiof_spi_read_fifo_8;
+ } else if (bits <= 16) {
+ bytes_per_word = 2;
+ if ((unsigned long)tx_buf & 0x01)
+ tx_fifo = sh_msiof_spi_write_fifo_16u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_16;
+
+ if ((unsigned long)rx_buf & 0x01)
+ rx_fifo = sh_msiof_spi_read_fifo_16u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_16;
+ } else if (swab) {
+ bytes_per_word = 4;
+ if ((unsigned long)tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_s32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_s32;
+
+ if ((unsigned long)rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_s32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_s32;
+ } else {
+ bytes_per_word = 4;
+ if ((unsigned long)tx_buf & 0x03)
+ tx_fifo = sh_msiof_spi_write_fifo_32u;
+ else
+ tx_fifo = sh_msiof_spi_write_fifo_32;
+
+ if ((unsigned long)rx_buf & 0x03)
+ rx_fifo = sh_msiof_spi_read_fifo_32u;
+ else
+ rx_fifo = sh_msiof_spi_read_fifo_32;
+ }
+
+ /* transfer in fifo sized chunks */
+ words = len / bytes_per_word;
+
+ while (words > 0) {
+ n = sh_msiof_spi_txrx_once(p, tx_fifo, rx_fifo, tx_buf, rx_buf,
+ words, bits);
+ if (n < 0)
+ return n;
+
+ if (tx_buf)
+ tx_buf += n * bytes_per_word;
+ if (rx_buf)
+ rx_buf += n * bytes_per_word;
+ words -= n;
+ }
+
+ return 0;
+}
+
+static const struct sh_msiof_chipdata sh_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 64,
+ .master_flags = 0,
+};
+
+static const struct sh_msiof_chipdata r8a779x_data = {
+ .tx_fifo_size = 64,
+ .rx_fifo_size = 256,
+ .master_flags = SPI_MASTER_MUST_TX,
+};
+
+static const struct of_device_id sh_msiof_match[] = {
+ { .compatible = "renesas,sh-msiof", .data = &sh_data },
+ { .compatible = "renesas,sh-mobile-msiof", .data = &sh_data },
+ { .compatible = "renesas,msiof-r8a7790", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7791", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7792", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7793", .data = &r8a779x_data },
+ { .compatible = "renesas,msiof-r8a7794", .data = &r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(of, sh_msiof_match);
+
+#ifdef CONFIG_OF
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ struct sh_msiof_spi_info *info;
+ struct device_node *np = dev->of_node;
+ u32 num_cs = 1;
+
+ info = devm_kzalloc(dev, sizeof(struct sh_msiof_spi_info), GFP_KERNEL);
+ if (!info)
+ return NULL;
+
+ /* Parse the MSIOF properties */
+ of_property_read_u32(np, "num-cs", &num_cs);
+ of_property_read_u32(np, "renesas,tx-fifo-size",
+ &info->tx_fifo_override);
+ of_property_read_u32(np, "renesas,rx-fifo-size",
+ &info->rx_fifo_override);
+ of_property_read_u32(np, "renesas,dtdl", &info->dtdl);
+ of_property_read_u32(np, "renesas,syncdl", &info->syncdl);
+
+ info->num_chipselect = num_cs;
+
+ return info;
+}
+#else
+static struct sh_msiof_spi_info *sh_msiof_spi_parse_dt(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static struct dma_chan *sh_msiof_request_dma_chan(struct device *dev,
+ enum dma_transfer_direction dir, unsigned int id, dma_addr_t port_addr)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct dma_slave_config cfg;
+ int ret;
+
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ chan = dma_request_slave_channel_compat(mask, shdma_chan_filter,
+ (void *)(unsigned long)id, dev,
+ dir == DMA_MEM_TO_DEV ? "tx" : "rx");
+ if (!chan) {
+ dev_warn(dev, "dma_request_slave_channel_compat failed\n");
+ return NULL;
+ }
+
+ memset(&cfg, 0, sizeof(cfg));
+ cfg.direction = dir;
+ if (dir == DMA_MEM_TO_DEV) {
+ cfg.dst_addr = port_addr;
+ cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ } else {
+ cfg.src_addr = port_addr;
+ cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ }
+
+ ret = dmaengine_slave_config(chan, &cfg);
+ if (ret) {
+ dev_warn(dev, "dmaengine_slave_config failed %d\n", ret);
+ dma_release_channel(chan);
+ return NULL;
+ }
+
+ return chan;
+}
+
+static int sh_msiof_request_dma(struct sh_msiof_spi_priv *p)
+{
+ struct platform_device *pdev = p->pdev;
+ struct device *dev = &pdev->dev;
+ const struct sh_msiof_spi_info *info = dev_get_platdata(dev);
+ unsigned int dma_tx_id, dma_rx_id;
+ const struct resource *res;
+ struct spi_master *master;
+ struct device *tx_dev, *rx_dev;
+
+ if (dev->of_node) {
+ /* In the OF case we will get the slave IDs from the DT */
+ dma_tx_id = 0;
+ dma_rx_id = 0;
+ } else if (info && info->dma_tx_id && info->dma_rx_id) {
+ dma_tx_id = info->dma_tx_id;
+ dma_rx_id = info->dma_rx_id;
+ } else {
+ /* The driver assumes no error */
+ return 0;
+ }
+
+ /* The DMA engine uses the second register set, if present */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (!res)
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ master = p->master;
+ master->dma_tx = sh_msiof_request_dma_chan(dev, DMA_MEM_TO_DEV,
+ dma_tx_id,
+ res->start + TFDR);
+ if (!master->dma_tx)
+ return -ENODEV;
+
+ master->dma_rx = sh_msiof_request_dma_chan(dev, DMA_DEV_TO_MEM,
+ dma_rx_id,
+ res->start + RFDR);
+ if (!master->dma_rx)
+ goto free_tx_chan;
+
+ p->tx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!p->tx_dma_page)
+ goto free_rx_chan;
+
+ p->rx_dma_page = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ if (!p->rx_dma_page)
+ goto free_tx_page;
+
+ tx_dev = master->dma_tx->device->dev;
+ p->tx_dma_addr = dma_map_single(tx_dev, p->tx_dma_page, PAGE_SIZE,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(tx_dev, p->tx_dma_addr))
+ goto free_rx_page;
+
+ rx_dev = master->dma_rx->device->dev;
+ p->rx_dma_addr = dma_map_single(rx_dev, p->rx_dma_page, PAGE_SIZE,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(rx_dev, p->rx_dma_addr))
+ goto unmap_tx_page;
+
+ dev_info(dev, "DMA available");
+ return 0;
+
+unmap_tx_page:
+ dma_unmap_single(tx_dev, p->tx_dma_addr, PAGE_SIZE, DMA_TO_DEVICE);
+free_rx_page:
+ free_page((unsigned long)p->rx_dma_page);
+free_tx_page:
+ free_page((unsigned long)p->tx_dma_page);
+free_rx_chan:
+ dma_release_channel(master->dma_rx);
+free_tx_chan:
+ dma_release_channel(master->dma_tx);
+ master->dma_tx = NULL;
+ return -ENODEV;
+}
+
+static void sh_msiof_release_dma(struct sh_msiof_spi_priv *p)
+{
+ struct spi_master *master = p->master;
+ struct device *dev;
+
+ if (!master->dma_tx)
+ return;
+
+ dev = &p->pdev->dev;
+ dma_unmap_single(master->dma_rx->device->dev, p->rx_dma_addr,
+ PAGE_SIZE, DMA_FROM_DEVICE);
+ dma_unmap_single(master->dma_tx->device->dev, p->tx_dma_addr,
+ PAGE_SIZE, DMA_TO_DEVICE);
+ free_page((unsigned long)p->rx_dma_page);
+ free_page((unsigned long)p->tx_dma_page);
+ dma_release_channel(master->dma_rx);
+ dma_release_channel(master->dma_tx);
+}
+
+static int sh_msiof_spi_probe(struct platform_device *pdev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ const struct of_device_id *of_id;
+ struct sh_msiof_spi_priv *p;
+ int i;
+ int ret;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sh_msiof_spi_priv));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "failed to allocate spi master\n");
+ return -ENOMEM;
+ }
+
+ p = spi_master_get_devdata(master);
+
+ platform_set_drvdata(pdev, p);
+ p->master = master;
+
+ of_id = of_match_device(sh_msiof_match, &pdev->dev);
+ if (of_id) {
+ p->chipdata = of_id->data;
+ p->info = sh_msiof_spi_parse_dt(&pdev->dev);
+ } else {
+ p->chipdata = (const void *)pdev->id_entry->driver_data;
+ p->info = dev_get_platdata(&pdev->dev);
+ }
+
+ if (!p->info) {
+ dev_err(&pdev->dev, "failed to obtain device info\n");
+ ret = -ENXIO;
+ goto err1;
+ }
+
+ init_completion(&p->done);
+
+ p->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(p->clk)) {
+ dev_err(&pdev->dev, "cannot get clock\n");
+ ret = PTR_ERR(p->clk);
+ goto err1;
+ }
+
+ i = platform_get_irq(pdev, 0);
+ if (i < 0) {
+ dev_err(&pdev->dev, "cannot get platform IRQ\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ p->mapbase = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(p->mapbase)) {
+ ret = PTR_ERR(p->mapbase);
+ goto err1;
+ }
+
+ ret = devm_request_irq(&pdev->dev, i, sh_msiof_spi_irq, 0,
+ dev_name(&pdev->dev), p);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to request irq\n");
+ goto err1;
+ }
+
+ p->pdev = pdev;
+ pm_runtime_enable(&pdev->dev);
+
+ /* Platform data may override FIFO sizes */
+ p->tx_fifo_size = p->chipdata->tx_fifo_size;
+ p->rx_fifo_size = p->chipdata->rx_fifo_size;
+ if (p->info->tx_fifo_override)
+ p->tx_fifo_size = p->info->tx_fifo_override;
+ if (p->info->rx_fifo_override)
+ p->rx_fifo_size = p->info->rx_fifo_override;
+
+ /* init master code */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->mode_bits |= SPI_LSB_FIRST | SPI_3WIRE;
+ master->flags = p->chipdata->master_flags;
+ master->bus_num = pdev->id;
+ master->dev.of_node = pdev->dev.of_node;
+ master->num_chipselect = p->info->num_chipselect;
+ master->setup = sh_msiof_spi_setup;
+ master->prepare_message = sh_msiof_prepare_message;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(8, 32);
+ master->auto_runtime_pm = true;
+ master->transfer_one = sh_msiof_transfer_one;
+
+ ret = sh_msiof_request_dma(p);
+ if (ret < 0)
+ dev_warn(&pdev->dev, "DMA not available, using PIO\n");
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_register_master error.\n");
+ goto err2;
+ }
+
+ return 0;
+
+ err2:
+ sh_msiof_release_dma(p);
+ pm_runtime_disable(&pdev->dev);
+ err1:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sh_msiof_spi_remove(struct platform_device *pdev)
+{
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ sh_msiof_release_dma(p);
+ pm_runtime_disable(&pdev->dev);
+ return 0;
+}
+
+static struct platform_device_id spi_driver_ids[] = {
+ { "spi_sh_msiof", (kernel_ulong_t)&sh_data },
+ { "spi_r8a7790_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7791_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7792_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7793_msiof", (kernel_ulong_t)&r8a779x_data },
+ { "spi_r8a7794_msiof", (kernel_ulong_t)&r8a779x_data },
+ {},
+};
+MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+
+static struct platform_driver sh_msiof_spi_drv = {
+ .probe = sh_msiof_spi_probe,
+ .remove = sh_msiof_spi_remove,
+ .id_table = spi_driver_ids,
+ .driver = {
+ .name = "spi_sh_msiof",
+ .of_match_table = of_match_ptr(sh_msiof_match),
+ },
+};
+module_platform_driver(sh_msiof_spi_drv);
+
+MODULE_DESCRIPTION("SuperH MSIOF SPI Master Interface Driver");
+MODULE_AUTHOR("Magnus Damm");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:spi_sh_msiof");
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
new file mode 100644
index 000000000..a9beeeed8
--- /dev/null
+++ b/drivers/spi/spi-sh-sci.c
@@ -0,0 +1,197 @@
+/*
+ * SH SCI SPI interface
+ *
+ * Copyright (c) 2008 Magnus Damm
+ *
+ * Based on S3C24XX GPIO based SPI driver, which is:
+ * Copyright (c) 2006 Ben Dooks
+ * Copyright (c) 2006 Simtec Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/platform_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/module.h>
+
+#include <asm/spi.h>
+#include <asm/io.h>
+
+struct sh_sci_spi {
+ struct spi_bitbang bitbang;
+
+ void __iomem *membase;
+ unsigned char val;
+ struct sh_spi_info *info;
+ struct platform_device *dev;
+};
+
+#define SCSPTR(sp) (sp->membase + 0x1c)
+#define PIN_SCK (1 << 2)
+#define PIN_TXD (1 << 0)
+#define PIN_RXD PIN_TXD
+#define PIN_INIT ((1 << 1) | (1 << 3) | PIN_SCK | PIN_TXD)
+
+static inline void setbits(struct sh_sci_spi *sp, int bits, int on)
+{
+ /*
+ * We are the only user of SCSPTR so no locking is required.
+ * Reading bit 2 and 0 in SCSPTR gives pin state as input.
+ * Writing the same bits sets the output value.
+ * This makes regular read-modify-write difficult so we
+ * use sp->val to keep track of the latest register value.
+ */
+
+ if (on)
+ sp->val |= bits;
+ else
+ sp->val &= ~bits;
+
+ iowrite8(sp->val, SCSPTR(sp));
+}
+
+static inline void setsck(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_SCK, on);
+}
+
+static inline void setmosi(struct spi_device *dev, int on)
+{
+ setbits(spi_master_get_devdata(dev->master), PIN_TXD, on);
+}
+
+static inline u32 getmiso(struct spi_device *dev)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ return (ioread8(SCSPTR(sp)) & PIN_RXD) ? 1 : 0;
+}
+
+#define spidelay(x) ndelay(x)
+
+#include "spi-bitbang-txrx.h"
+
+static u32 sh_sci_spi_txrx_mode0(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 0, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode1(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 0, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode2(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha0(spi, nsecs, 1, 0, word, bits);
+}
+
+static u32 sh_sci_spi_txrx_mode3(struct spi_device *spi,
+ unsigned nsecs, u32 word, u8 bits)
+{
+ return bitbang_txrx_be_cpha1(spi, nsecs, 1, 0, word, bits);
+}
+
+static void sh_sci_spi_chipselect(struct spi_device *dev, int value)
+{
+ struct sh_sci_spi *sp = spi_master_get_devdata(dev->master);
+
+ if (sp->info->chip_select)
+ (sp->info->chip_select)(sp->info, dev->chip_select, value);
+}
+
+static int sh_sci_spi_probe(struct platform_device *dev)
+{
+ struct resource *r;
+ struct spi_master *master;
+ struct sh_sci_spi *sp;
+ int ret;
+
+ master = spi_alloc_master(&dev->dev, sizeof(struct sh_sci_spi));
+ if (master == NULL) {
+ dev_err(&dev->dev, "failed to allocate spi master\n");
+ ret = -ENOMEM;
+ goto err0;
+ }
+
+ sp = spi_master_get_devdata(master);
+
+ platform_set_drvdata(dev, sp);
+ sp->info = dev_get_platdata(&dev->dev);
+ if (!sp->info) {
+ dev_err(&dev->dev, "platform data is missing\n");
+ ret = -ENOENT;
+ goto err1;
+ }
+
+ /* setup spi bitbang adaptor */
+ sp->bitbang.master = master;
+ sp->bitbang.master->bus_num = sp->info->bus_num;
+ sp->bitbang.master->num_chipselect = sp->info->num_chipselect;
+ sp->bitbang.chipselect = sh_sci_spi_chipselect;
+
+ sp->bitbang.txrx_word[SPI_MODE_0] = sh_sci_spi_txrx_mode0;
+ sp->bitbang.txrx_word[SPI_MODE_1] = sh_sci_spi_txrx_mode1;
+ sp->bitbang.txrx_word[SPI_MODE_2] = sh_sci_spi_txrx_mode2;
+ sp->bitbang.txrx_word[SPI_MODE_3] = sh_sci_spi_txrx_mode3;
+
+ r = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ ret = -ENOENT;
+ goto err1;
+ }
+ sp->membase = ioremap(r->start, resource_size(r));
+ if (!sp->membase) {
+ ret = -ENXIO;
+ goto err1;
+ }
+ sp->val = ioread8(SCSPTR(sp));
+ setbits(sp, PIN_INIT, 1);
+
+ ret = spi_bitbang_start(&sp->bitbang);
+ if (!ret)
+ return 0;
+
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ err1:
+ spi_master_put(sp->bitbang.master);
+ err0:
+ return ret;
+}
+
+static int sh_sci_spi_remove(struct platform_device *dev)
+{
+ struct sh_sci_spi *sp = platform_get_drvdata(dev);
+
+ spi_bitbang_stop(&sp->bitbang);
+ setbits(sp, PIN_INIT, 0);
+ iounmap(sp->membase);
+ spi_master_put(sp->bitbang.master);
+ return 0;
+}
+
+static struct platform_driver sh_sci_spi_drv = {
+ .probe = sh_sci_spi_probe,
+ .remove = sh_sci_spi_remove,
+ .driver = {
+ .name = "spi_sh_sci",
+ },
+};
+module_platform_driver(sh_sci_spi_drv);
+
+MODULE_DESCRIPTION("SH SCI SPI Driver");
+MODULE_AUTHOR("Magnus Damm <damm@opensource.se>");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:spi_sh_sci");
diff --git a/drivers/spi/spi-sh.c b/drivers/spi/spi-sh.c
new file mode 100644
index 000000000..502501187
--- /dev/null
+++ b/drivers/spi/spi-sh.c
@@ -0,0 +1,537 @@
+/*
+ * SH SPI bus driver
+ *
+ * Copyright (C) 2011 Renesas Solutions Corp.
+ *
+ * Based on pxa2xx_spi.c:
+ * Copyright (C) 2005 Stephen Street / StreetFire Sound Labs
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/errno.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/list.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/spi/spi.h>
+
+#define SPI_SH_TBR 0x00
+#define SPI_SH_RBR 0x00
+#define SPI_SH_CR1 0x08
+#define SPI_SH_CR2 0x10
+#define SPI_SH_CR3 0x18
+#define SPI_SH_CR4 0x20
+#define SPI_SH_CR5 0x28
+
+/* CR1 */
+#define SPI_SH_TBE 0x80
+#define SPI_SH_TBF 0x40
+#define SPI_SH_RBE 0x20
+#define SPI_SH_RBF 0x10
+#define SPI_SH_PFONRD 0x08
+#define SPI_SH_SSDB 0x04
+#define SPI_SH_SSD 0x02
+#define SPI_SH_SSA 0x01
+
+/* CR2 */
+#define SPI_SH_RSTF 0x80
+#define SPI_SH_LOOPBK 0x40
+#define SPI_SH_CPOL 0x20
+#define SPI_SH_CPHA 0x10
+#define SPI_SH_L1M0 0x08
+
+/* CR3 */
+#define SPI_SH_MAX_BYTE 0xFF
+
+/* CR4 */
+#define SPI_SH_TBEI 0x80
+#define SPI_SH_TBFI 0x40
+#define SPI_SH_RBEI 0x20
+#define SPI_SH_RBFI 0x10
+#define SPI_SH_WPABRT 0x04
+#define SPI_SH_SSS 0x01
+
+/* CR8 */
+#define SPI_SH_P1L0 0x80
+#define SPI_SH_PP1L0 0x40
+#define SPI_SH_MUXI 0x20
+#define SPI_SH_MUXIRQ 0x10
+
+#define SPI_SH_FIFO_SIZE 32
+#define SPI_SH_SEND_TIMEOUT (3 * HZ)
+#define SPI_SH_RECEIVE_TIMEOUT (HZ >> 3)
+
+#undef DEBUG
+
+struct spi_sh_data {
+ void __iomem *addr;
+ int irq;
+ struct spi_master *master;
+ struct list_head queue;
+ struct workqueue_struct *workqueue;
+ struct work_struct ws;
+ unsigned long cr1;
+ wait_queue_head_t wait;
+ spinlock_t lock;
+ int width;
+};
+
+static void spi_sh_write(struct spi_sh_data *ss, unsigned long data,
+ unsigned long offset)
+{
+ if (ss->width == 8)
+ iowrite8(data, ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ iowrite32(data, ss->addr + offset);
+}
+
+static unsigned long spi_sh_read(struct spi_sh_data *ss, unsigned long offset)
+{
+ if (ss->width == 8)
+ return ioread8(ss->addr + (offset >> 2));
+ else if (ss->width == 32)
+ return ioread32(ss->addr + offset);
+ else
+ return 0;
+}
+
+static void spi_sh_set_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp |= val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void spi_sh_clear_bit(struct spi_sh_data *ss, unsigned long val,
+ unsigned long offset)
+{
+ unsigned long tmp;
+
+ tmp = spi_sh_read(ss, offset);
+ tmp &= ~val;
+ spi_sh_write(ss, tmp, offset);
+}
+
+static void clear_fifo(struct spi_sh_data *ss)
+{
+ spi_sh_set_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+ spi_sh_clear_bit(ss, SPI_SH_RSTF, SPI_SH_CR2);
+}
+
+static int spi_sh_wait_receive_buffer(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_wait_write_buffer_empty(struct spi_sh_data *ss)
+{
+ int timeout = 100000;
+
+ while (!(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBE)) {
+ udelay(10);
+ if (timeout-- < 0)
+ return -ETIMEDOUT;
+ }
+ return 0;
+}
+
+static int spi_sh_send(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i, retval = 0;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ long ret;
+
+ if (t->len)
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ data = (unsigned char *)t->tx_buf;
+ while (remain > 0) {
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len &&
+ !(spi_sh_read(ss, SPI_SH_CR4) &
+ SPI_SH_WPABRT) &&
+ !(spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_TBF);
+ i++)
+ spi_sh_write(ss, (unsigned long)data[i], SPI_SH_TBR);
+
+ if (spi_sh_read(ss, SPI_SH_CR4) & SPI_SH_WPABRT) {
+ /* Abort SPI operation */
+ spi_sh_set_bit(ss, SPI_SH_WPABRT, SPI_SH_CR4);
+ retval = -EIO;
+ break;
+ }
+
+ cur_len = i;
+
+ remain -= cur_len;
+ data += cur_len;
+
+ if (remain > 0) {
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && !(ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+ }
+
+ if (list_is_last(&t->transfer_list, &mesg->transfers)) {
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ ss->cr1 &= ~SPI_SH_TBE;
+ spi_sh_set_bit(ss, SPI_SH_TBE, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_TBE,
+ SPI_SH_SEND_TIMEOUT);
+ if (ret == 0 && (ss->cr1 & SPI_SH_TBE)) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ return retval;
+}
+
+static int spi_sh_receive(struct spi_sh_data *ss, struct spi_message *mesg,
+ struct spi_transfer *t)
+{
+ int i;
+ int remain = t->len;
+ int cur_len;
+ unsigned char *data;
+ long ret;
+
+ if (t->len > SPI_SH_MAX_BYTE)
+ spi_sh_write(ss, SPI_SH_MAX_BYTE, SPI_SH_CR3);
+ else
+ spi_sh_write(ss, t->len, SPI_SH_CR3);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSD | SPI_SH_SSDB, SPI_SH_CR1);
+ spi_sh_set_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ spi_sh_wait_write_buffer_empty(ss);
+
+ data = (unsigned char *)t->rx_buf;
+ while (remain > 0) {
+ if (remain >= SPI_SH_FIFO_SIZE) {
+ ss->cr1 &= ~SPI_SH_RBF;
+ spi_sh_set_bit(ss, SPI_SH_RBF, SPI_SH_CR4);
+ ret = wait_event_interruptible_timeout(ss->wait,
+ ss->cr1 & SPI_SH_RBF,
+ SPI_SH_RECEIVE_TIMEOUT);
+ if (ret == 0 &&
+ spi_sh_read(ss, SPI_SH_CR1) & SPI_SH_RBE) {
+ printk(KERN_ERR "%s: timeout\n", __func__);
+ return -ETIMEDOUT;
+ }
+ }
+
+ cur_len = min(SPI_SH_FIFO_SIZE, remain);
+ for (i = 0; i < cur_len; i++) {
+ if (spi_sh_wait_receive_buffer(ss))
+ break;
+ data[i] = (unsigned char)spi_sh_read(ss, SPI_SH_RBR);
+ }
+
+ remain -= cur_len;
+ data += cur_len;
+ }
+
+ /* deassert CS when SPI is receiving. */
+ if (t->len > SPI_SH_MAX_BYTE) {
+ clear_fifo(ss);
+ spi_sh_write(ss, 1, SPI_SH_CR3);
+ } else {
+ spi_sh_write(ss, 0, SPI_SH_CR3);
+ }
+
+ return 0;
+}
+
+static void spi_sh_work(struct work_struct *work)
+{
+ struct spi_sh_data *ss = container_of(work, struct spi_sh_data, ws);
+ struct spi_message *mesg;
+ struct spi_transfer *t;
+ unsigned long flags;
+ int ret;
+
+ pr_debug("%s: enter\n", __func__);
+
+ spin_lock_irqsave(&ss->lock, flags);
+ while (!list_empty(&ss->queue)) {
+ mesg = list_entry(ss->queue.next, struct spi_message, queue);
+ list_del_init(&mesg->queue);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+ list_for_each_entry(t, &mesg->transfers, transfer_list) {
+ pr_debug("tx_buf = %p, rx_buf = %p\n",
+ t->tx_buf, t->rx_buf);
+ pr_debug("len = %d, delay_usecs = %d\n",
+ t->len, t->delay_usecs);
+
+ if (t->tx_buf) {
+ ret = spi_sh_send(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ if (t->rx_buf) {
+ ret = spi_sh_receive(ss, mesg, t);
+ if (ret < 0)
+ goto error;
+ }
+ mesg->actual_length += t->len;
+ }
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->status = 0;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+ }
+
+ clear_fifo(ss);
+ spi_sh_set_bit(ss, SPI_SH_SSD, SPI_SH_CR1);
+ udelay(100);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+
+ clear_fifo(ss);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return;
+
+ error:
+ mesg->status = ret;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+ clear_fifo(ss);
+
+}
+
+static int spi_sh_setup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_write(ss, 0xfe, SPI_SH_CR1); /* SPI sycle stop */
+ spi_sh_write(ss, 0x00, SPI_SH_CR1); /* CR1 init */
+ spi_sh_write(ss, 0x00, SPI_SH_CR3); /* CR3 init */
+
+ clear_fifo(ss);
+
+ /* 1/8 clock */
+ spi_sh_write(ss, spi_sh_read(ss, SPI_SH_CR2) | 0x07, SPI_SH_CR2);
+ udelay(10);
+
+ return 0;
+}
+
+static int spi_sh_transfer(struct spi_device *spi, struct spi_message *mesg)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+ unsigned long flags;
+
+ pr_debug("%s: enter\n", __func__);
+ pr_debug("\tmode = %02x\n", spi->mode);
+
+ spin_lock_irqsave(&ss->lock, flags);
+
+ mesg->actual_length = 0;
+ mesg->status = -EINPROGRESS;
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA, SPI_SH_CR1);
+
+ list_add_tail(&mesg->queue, &ss->queue);
+ queue_work(ss->workqueue, &ss->ws);
+
+ spin_unlock_irqrestore(&ss->lock, flags);
+
+ return 0;
+}
+
+static void spi_sh_cleanup(struct spi_device *spi)
+{
+ struct spi_sh_data *ss = spi_master_get_devdata(spi->master);
+
+ pr_debug("%s: enter\n", __func__);
+
+ spi_sh_clear_bit(ss, SPI_SH_SSA | SPI_SH_SSDB | SPI_SH_SSD,
+ SPI_SH_CR1);
+}
+
+static irqreturn_t spi_sh_irq(int irq, void *_ss)
+{
+ struct spi_sh_data *ss = (struct spi_sh_data *)_ss;
+ unsigned long cr1;
+
+ cr1 = spi_sh_read(ss, SPI_SH_CR1);
+ if (cr1 & SPI_SH_TBE)
+ ss->cr1 |= SPI_SH_TBE;
+ if (cr1 & SPI_SH_TBF)
+ ss->cr1 |= SPI_SH_TBF;
+ if (cr1 & SPI_SH_RBE)
+ ss->cr1 |= SPI_SH_RBE;
+ if (cr1 & SPI_SH_RBF)
+ ss->cr1 |= SPI_SH_RBF;
+
+ if (ss->cr1) {
+ spi_sh_clear_bit(ss, ss->cr1, SPI_SH_CR4);
+ wake_up(&ss->wait);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int spi_sh_remove(struct platform_device *pdev)
+{
+ struct spi_sh_data *ss = platform_get_drvdata(pdev);
+
+ spi_unregister_master(ss->master);
+ destroy_workqueue(ss->workqueue);
+ free_irq(ss->irq, ss);
+
+ return 0;
+}
+
+static int spi_sh_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ struct spi_master *master;
+ struct spi_sh_data *ss;
+ int ret, irq;
+
+ /* get base addr */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (unlikely(res == NULL)) {
+ dev_err(&pdev->dev, "invalid resource\n");
+ return -EINVAL;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "platform_get_irq error\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct spi_sh_data));
+ if (master == NULL) {
+ dev_err(&pdev->dev, "spi_alloc_master error.\n");
+ return -ENOMEM;
+ }
+
+ ss = spi_master_get_devdata(master);
+ platform_set_drvdata(pdev, ss);
+
+ switch (res->flags & IORESOURCE_MEM_TYPE_MASK) {
+ case IORESOURCE_MEM_8BIT:
+ ss->width = 8;
+ break;
+ case IORESOURCE_MEM_32BIT:
+ ss->width = 32;
+ break;
+ default:
+ dev_err(&pdev->dev, "No support width\n");
+ ret = -ENODEV;
+ goto error1;
+ }
+ ss->irq = irq;
+ ss->master = master;
+ ss->addr = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (ss->addr == NULL) {
+ dev_err(&pdev->dev, "ioremap error.\n");
+ ret = -ENOMEM;
+ goto error1;
+ }
+ INIT_LIST_HEAD(&ss->queue);
+ spin_lock_init(&ss->lock);
+ INIT_WORK(&ss->ws, spi_sh_work);
+ init_waitqueue_head(&ss->wait);
+ ss->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (ss->workqueue == NULL) {
+ dev_err(&pdev->dev, "create workqueue error\n");
+ ret = -EBUSY;
+ goto error1;
+ }
+
+ ret = request_irq(irq, spi_sh_irq, 0, "spi_sh", ss);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "request_irq error\n");
+ goto error2;
+ }
+
+ master->num_chipselect = 2;
+ master->bus_num = pdev->id;
+ master->setup = spi_sh_setup;
+ master->transfer = spi_sh_transfer;
+ master->cleanup = spi_sh_cleanup;
+
+ ret = spi_register_master(master);
+ if (ret < 0) {
+ printk(KERN_ERR "spi_register_master error.\n");
+ goto error3;
+ }
+
+ return 0;
+
+ error3:
+ free_irq(irq, ss);
+ error2:
+ destroy_workqueue(ss->workqueue);
+ error1:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static struct platform_driver spi_sh_driver = {
+ .probe = spi_sh_probe,
+ .remove = spi_sh_remove,
+ .driver = {
+ .name = "sh_spi",
+ },
+};
+module_platform_driver(spi_sh_driver);
+
+MODULE_DESCRIPTION("SH SPI bus driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yoshihiro Shimoda");
+MODULE_ALIAS("platform:sh_spi");
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
new file mode 100644
index 000000000..f5715c9f6
--- /dev/null
+++ b/drivers/spi/spi-sirf.c
@@ -0,0 +1,838 @@
+/*
+ * SPI bus driver for CSR SiRFprimaII
+ *
+ * Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
+ *
+ * Licensed under GPLv2 or later.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/bitops.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-direction.h>
+#include <linux/dma-mapping.h>
+#include <linux/reset.h>
+
+#define DRIVER_NAME "sirfsoc_spi"
+
+#define SIRFSOC_SPI_CTRL 0x0000
+#define SIRFSOC_SPI_CMD 0x0004
+#define SIRFSOC_SPI_TX_RX_EN 0x0008
+#define SIRFSOC_SPI_INT_EN 0x000C
+#define SIRFSOC_SPI_INT_STATUS 0x0010
+#define SIRFSOC_SPI_TX_DMA_IO_CTRL 0x0100
+#define SIRFSOC_SPI_TX_DMA_IO_LEN 0x0104
+#define SIRFSOC_SPI_TXFIFO_CTRL 0x0108
+#define SIRFSOC_SPI_TXFIFO_LEVEL_CHK 0x010C
+#define SIRFSOC_SPI_TXFIFO_OP 0x0110
+#define SIRFSOC_SPI_TXFIFO_STATUS 0x0114
+#define SIRFSOC_SPI_TXFIFO_DATA 0x0118
+#define SIRFSOC_SPI_RX_DMA_IO_CTRL 0x0120
+#define SIRFSOC_SPI_RX_DMA_IO_LEN 0x0124
+#define SIRFSOC_SPI_RXFIFO_CTRL 0x0128
+#define SIRFSOC_SPI_RXFIFO_LEVEL_CHK 0x012C
+#define SIRFSOC_SPI_RXFIFO_OP 0x0130
+#define SIRFSOC_SPI_RXFIFO_STATUS 0x0134
+#define SIRFSOC_SPI_RXFIFO_DATA 0x0138
+#define SIRFSOC_SPI_DUMMY_DELAY_CTL 0x0144
+
+/* SPI CTRL register defines */
+#define SIRFSOC_SPI_SLV_MODE BIT(16)
+#define SIRFSOC_SPI_CMD_MODE BIT(17)
+#define SIRFSOC_SPI_CS_IO_OUT BIT(18)
+#define SIRFSOC_SPI_CS_IO_MODE BIT(19)
+#define SIRFSOC_SPI_CLK_IDLE_STAT BIT(20)
+#define SIRFSOC_SPI_CS_IDLE_STAT BIT(21)
+#define SIRFSOC_SPI_TRAN_MSB BIT(22)
+#define SIRFSOC_SPI_DRV_POS_EDGE BIT(23)
+#define SIRFSOC_SPI_CS_HOLD_TIME BIT(24)
+#define SIRFSOC_SPI_CLK_SAMPLE_MODE BIT(25)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_8 (0 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_12 (1 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_16 (2 << 26)
+#define SIRFSOC_SPI_TRAN_DAT_FORMAT_32 (3 << 26)
+#define SIRFSOC_SPI_CMD_BYTE_NUM(x) ((x & 3) << 28)
+#define SIRFSOC_SPI_ENA_AUTO_CLR BIT(30)
+#define SIRFSOC_SPI_MUL_DAT_MODE BIT(31)
+
+/* Interrupt Enable */
+#define SIRFSOC_SPI_RX_DONE_INT_EN BIT(0)
+#define SIRFSOC_SPI_TX_DONE_INT_EN BIT(1)
+#define SIRFSOC_SPI_RX_OFLOW_INT_EN BIT(2)
+#define SIRFSOC_SPI_TX_UFLOW_INT_EN BIT(3)
+#define SIRFSOC_SPI_RX_IO_DMA_INT_EN BIT(4)
+#define SIRFSOC_SPI_TX_IO_DMA_INT_EN BIT(5)
+#define SIRFSOC_SPI_RXFIFO_FULL_INT_EN BIT(6)
+#define SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN BIT(7)
+#define SIRFSOC_SPI_RXFIFO_THD_INT_EN BIT(8)
+#define SIRFSOC_SPI_TXFIFO_THD_INT_EN BIT(9)
+#define SIRFSOC_SPI_FRM_END_INT_EN BIT(10)
+
+#define SIRFSOC_SPI_INT_MASK_ALL 0x1FFF
+
+/* Interrupt status */
+#define SIRFSOC_SPI_RX_DONE BIT(0)
+#define SIRFSOC_SPI_TX_DONE BIT(1)
+#define SIRFSOC_SPI_RX_OFLOW BIT(2)
+#define SIRFSOC_SPI_TX_UFLOW BIT(3)
+#define SIRFSOC_SPI_RX_IO_DMA BIT(4)
+#define SIRFSOC_SPI_RX_FIFO_FULL BIT(6)
+#define SIRFSOC_SPI_TXFIFO_EMPTY BIT(7)
+#define SIRFSOC_SPI_RXFIFO_THD_REACH BIT(8)
+#define SIRFSOC_SPI_TXFIFO_THD_REACH BIT(9)
+#define SIRFSOC_SPI_FRM_END BIT(10)
+
+/* TX RX enable */
+#define SIRFSOC_SPI_RX_EN BIT(0)
+#define SIRFSOC_SPI_TX_EN BIT(1)
+#define SIRFSOC_SPI_CMD_TX_EN BIT(2)
+
+#define SIRFSOC_SPI_IO_MODE_SEL BIT(0)
+#define SIRFSOC_SPI_RX_DMA_FLUSH BIT(2)
+
+/* FIFO OPs */
+#define SIRFSOC_SPI_FIFO_RESET BIT(0)
+#define SIRFSOC_SPI_FIFO_START BIT(1)
+
+/* FIFO CTRL */
+#define SIRFSOC_SPI_FIFO_WIDTH_BYTE (0 << 0)
+#define SIRFSOC_SPI_FIFO_WIDTH_WORD (1 << 0)
+#define SIRFSOC_SPI_FIFO_WIDTH_DWORD (2 << 0)
+
+/* FIFO Status */
+#define SIRFSOC_SPI_FIFO_LEVEL_MASK 0xFF
+#define SIRFSOC_SPI_FIFO_FULL BIT(8)
+#define SIRFSOC_SPI_FIFO_EMPTY BIT(9)
+
+/* 256 bytes rx/tx FIFO */
+#define SIRFSOC_SPI_FIFO_SIZE 256
+#define SIRFSOC_SPI_DAT_FRM_LEN_MAX (64 * 1024)
+
+#define SIRFSOC_SPI_FIFO_SC(x) ((x) & 0x3F)
+#define SIRFSOC_SPI_FIFO_LC(x) (((x) & 0x3F) << 10)
+#define SIRFSOC_SPI_FIFO_HC(x) (((x) & 0x3F) << 20)
+#define SIRFSOC_SPI_FIFO_THD(x) (((x) & 0xFF) << 2)
+
+/*
+ * only if the rx/tx buffer and transfer size are 4-bytes aligned, we use dma
+ * due to the limitation of dma controller
+ */
+
+#define ALIGNED(x) (!((u32)x & 0x3))
+#define IS_DMA_VALID(x) (x && ALIGNED(x->tx_buf) && ALIGNED(x->rx_buf) && \
+ ALIGNED(x->len) && (x->len < 2 * PAGE_SIZE))
+
+#define SIRFSOC_MAX_CMD_BYTES 4
+#define SIRFSOC_SPI_DEFAULT_FRQ 1000000
+
+struct sirfsoc_spi {
+ struct spi_bitbang bitbang;
+ struct completion rx_done;
+ struct completion tx_done;
+
+ void __iomem *base;
+ u32 ctrl_freq; /* SPI controller clock speed */
+ struct clk *clk;
+
+ /* rx & tx bufs from the spi_transfer */
+ const void *tx;
+ void *rx;
+
+ /* place received word into rx buffer */
+ void (*rx_word) (struct sirfsoc_spi *);
+ /* get word from tx buffer for sending */
+ void (*tx_word) (struct sirfsoc_spi *);
+
+ /* number of words left to be tranmitted/received */
+ unsigned int left_tx_word;
+ unsigned int left_rx_word;
+
+ /* rx & tx DMA channels */
+ struct dma_chan *rx_chan;
+ struct dma_chan *tx_chan;
+ dma_addr_t src_start;
+ dma_addr_t dst_start;
+ void *dummypage;
+ int word_width; /* in bytes */
+
+ /*
+ * if tx size is not more than 4 and rx size is NULL, use
+ * command model
+ */
+ bool tx_by_cmd;
+ bool hw_cs;
+};
+
+static void spi_sirfsoc_rx_word_u8(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u8 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u8) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_word--;
+}
+
+static void spi_sirfsoc_tx_word_u8(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u8 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_word--;
+}
+
+static void spi_sirfsoc_rx_word_u16(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u16 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u16) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_word--;
+}
+
+static void spi_sirfsoc_tx_word_u16(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u16 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_word--;
+}
+
+static void spi_sirfsoc_rx_word_u32(struct sirfsoc_spi *sspi)
+{
+ u32 data;
+ u32 *rx = sspi->rx;
+
+ data = readl(sspi->base + SIRFSOC_SPI_RXFIFO_DATA);
+
+ if (rx) {
+ *rx++ = (u32) data;
+ sspi->rx = rx;
+ }
+
+ sspi->left_rx_word--;
+
+}
+
+static void spi_sirfsoc_tx_word_u32(struct sirfsoc_spi *sspi)
+{
+ u32 data = 0;
+ const u32 *tx = sspi->tx;
+
+ if (tx) {
+ data = *tx++;
+ sspi->tx = tx;
+ }
+
+ writel(data, sspi->base + SIRFSOC_SPI_TXFIFO_DATA);
+ sspi->left_tx_word--;
+}
+
+static irqreturn_t spi_sirfsoc_irq(int irq, void *dev_id)
+{
+ struct sirfsoc_spi *sspi = dev_id;
+ u32 spi_stat = readl(sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->tx_by_cmd && (spi_stat & SIRFSOC_SPI_FRM_END)) {
+ complete(&sspi->tx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ return IRQ_HANDLED;
+ }
+
+ /* Error Conditions */
+ if (spi_stat & SIRFSOC_SPI_RX_OFLOW ||
+ spi_stat & SIRFSOC_SPI_TX_UFLOW) {
+ complete(&sspi->tx_done);
+ complete(&sspi->rx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ return IRQ_HANDLED;
+ }
+ if (spi_stat & SIRFSOC_SPI_TXFIFO_EMPTY)
+ complete(&sspi->tx_done);
+ while (!(readl(sspi->base + SIRFSOC_SPI_INT_STATUS) &
+ SIRFSOC_SPI_RX_IO_DMA))
+ cpu_relax();
+ complete(&sspi->rx_done);
+ writel(0x0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+
+ return IRQ_HANDLED;
+}
+
+static void spi_sirfsoc_dma_fini_callback(void *data)
+{
+ struct completion *dma_complete = data;
+
+ complete(dma_complete);
+}
+
+static void spi_sirfsoc_cmd_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ int timeout = t->len * 10;
+ u32 cmd;
+
+ sspi = spi_master_get_devdata(spi->master);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ memcpy(&cmd, sspi->tx, t->len);
+ if (sspi->word_width == 1 && !(spi->mode & SPI_LSB_FIRST))
+ cmd = cpu_to_be32(cmd) >>
+ ((SIRFSOC_MAX_CMD_BYTES - t->len) * 8);
+ if (sspi->word_width == 2 && t->len == 4 &&
+ (!(spi->mode & SPI_LSB_FIRST)))
+ cmd = ((cmd & 0xffff) << 16) | (cmd >> 16);
+ writel(cmd, sspi->base + SIRFSOC_SPI_CMD);
+ writel(SIRFSOC_SPI_FRM_END_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_CMD_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "cmd transfer timeout\n");
+ return;
+ }
+ sspi->left_rx_word -= t->len;
+}
+
+static void spi_sirfsoc_dma_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ struct dma_async_tx_descriptor *rx_desc, *tx_desc;
+ int timeout = t->len * 10;
+
+ sspi = spi_master_get_devdata(spi->master);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL, sspi->base + SIRFSOC_SPI_INT_STATUS);
+ if (sspi->left_tx_word < SIRFSOC_SPI_DAT_FRM_LEN_MAX) {
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
+ SIRFSOC_SPI_ENA_AUTO_CLR | SIRFSOC_SPI_MUL_DAT_MODE,
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(sspi->left_tx_word - 1,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ } else {
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL),
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(0, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ }
+ sspi->dst_start = dma_map_single(&spi->dev, sspi->rx, t->len,
+ (t->tx_buf != t->rx_buf) ?
+ DMA_FROM_DEVICE : DMA_BIDIRECTIONAL);
+ rx_desc = dmaengine_prep_slave_single(sspi->rx_chan,
+ sspi->dst_start, t->len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ rx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ rx_desc->callback_param = &sspi->rx_done;
+
+ sspi->src_start = dma_map_single(&spi->dev, (void *)sspi->tx, t->len,
+ (t->tx_buf != t->rx_buf) ?
+ DMA_TO_DEVICE : DMA_BIDIRECTIONAL);
+ tx_desc = dmaengine_prep_slave_single(sspi->tx_chan,
+ sspi->src_start, t->len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ tx_desc->callback = spi_sirfsoc_dma_fini_callback;
+ tx_desc->callback_param = &sspi->tx_done;
+
+ dmaengine_submit(tx_desc);
+ dmaengine_submit(rx_desc);
+ dma_async_issue_pending(sspi->tx_chan);
+ dma_async_issue_pending(sspi->rx_chan);
+ writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (wait_for_completion_timeout(&sspi->rx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->rx_chan);
+ } else
+ sspi->left_rx_word = 0;
+ /*
+ * we only wait tx-done event if transferring by DMA. for PIO,
+ * we get rx data by writing tx data, so if rx is done, tx has
+ * done earlier
+ */
+ if (wait_for_completion_timeout(&sspi->tx_done, timeout) == 0) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ dmaengine_terminate_all(sspi->tx_chan);
+ }
+ dma_unmap_single(&spi->dev, sspi->src_start, t->len, DMA_TO_DEVICE);
+ dma_unmap_single(&spi->dev, sspi->dst_start, t->len, DMA_FROM_DEVICE);
+ /* TX, RX FIFO stop */
+ writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ if (sspi->left_tx_word >= SIRFSOC_SPI_DAT_FRM_LEN_MAX)
+ writel(0, sspi->base + SIRFSOC_SPI_TX_RX_EN);
+}
+
+static void spi_sirfsoc_pio_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ int timeout = t->len * 10;
+
+ sspi = spi_master_get_devdata(spi->master);
+ do {
+ writel(SIRFSOC_SPI_FIFO_RESET,
+ sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET,
+ sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START,
+ sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_INT_MASK_ALL,
+ sspi->base + SIRFSOC_SPI_INT_STATUS);
+ writel(readl(sspi->base + SIRFSOC_SPI_CTRL) |
+ SIRFSOC_SPI_MUL_DAT_MODE | SIRFSOC_SPI_ENA_AUTO_CLR,
+ sspi->base + SIRFSOC_SPI_CTRL);
+ writel(min(sspi->left_tx_word, (u32)(256 / sspi->word_width))
+ - 1, sspi->base + SIRFSOC_SPI_TX_DMA_IO_LEN);
+ writel(min(sspi->left_rx_word, (u32)(256 / sspi->word_width))
+ - 1, sspi->base + SIRFSOC_SPI_RX_DMA_IO_LEN);
+ while (!((readl(sspi->base + SIRFSOC_SPI_TXFIFO_STATUS)
+ & SIRFSOC_SPI_FIFO_FULL)) && sspi->left_tx_word)
+ sspi->tx_word(sspi);
+ writel(SIRFSOC_SPI_TXFIFO_EMPTY_INT_EN |
+ SIRFSOC_SPI_TX_UFLOW_INT_EN |
+ SIRFSOC_SPI_RX_OFLOW_INT_EN |
+ SIRFSOC_SPI_RX_IO_DMA_INT_EN,
+ sspi->base + SIRFSOC_SPI_INT_EN);
+ writel(SIRFSOC_SPI_RX_EN | SIRFSOC_SPI_TX_EN,
+ sspi->base + SIRFSOC_SPI_TX_RX_EN);
+ if (!wait_for_completion_timeout(&sspi->tx_done, timeout) ||
+ !wait_for_completion_timeout(&sspi->rx_done, timeout)) {
+ dev_err(&spi->dev, "transfer timeout\n");
+ break;
+ }
+ while (!((readl(sspi->base + SIRFSOC_SPI_RXFIFO_STATUS)
+ & SIRFSOC_SPI_FIFO_EMPTY)) && sspi->left_rx_word)
+ sspi->rx_word(sspi);
+ writel(0, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(0, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ } while (sspi->left_tx_word != 0 || sspi->left_rx_word != 0);
+}
+
+static int spi_sirfsoc_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ sspi = spi_master_get_devdata(spi->master);
+
+ sspi->tx = t->tx_buf ? t->tx_buf : sspi->dummypage;
+ sspi->rx = t->rx_buf ? t->rx_buf : sspi->dummypage;
+ sspi->left_tx_word = sspi->left_rx_word = t->len / sspi->word_width;
+ reinit_completion(&sspi->rx_done);
+ reinit_completion(&sspi->tx_done);
+ /*
+ * in the transfer, if transfer data using command register with rx_buf
+ * null, just fill command data into command register and wait for its
+ * completion.
+ */
+ if (sspi->tx_by_cmd)
+ spi_sirfsoc_cmd_transfer(spi, t);
+ else if (IS_DMA_VALID(t))
+ spi_sirfsoc_dma_transfer(spi, t);
+ else
+ spi_sirfsoc_pio_transfer(spi, t);
+
+ return t->len - sspi->left_rx_word * sspi->word_width;
+}
+
+static void spi_sirfsoc_chipselect(struct spi_device *spi, int value)
+{
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(spi->master);
+
+ if (sspi->hw_cs) {
+ u32 regval = readl(sspi->base + SIRFSOC_SPI_CTRL);
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ case BITBANG_CS_INACTIVE:
+ if (spi->mode & SPI_CS_HIGH)
+ regval &= ~SIRFSOC_SPI_CS_IO_OUT;
+ else
+ regval |= SIRFSOC_SPI_CS_IO_OUT;
+ break;
+ }
+ writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+ } else {
+ switch (value) {
+ case BITBANG_CS_ACTIVE:
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 1 : 0);
+ break;
+ case BITBANG_CS_INACTIVE:
+ gpio_direction_output(spi->cs_gpio,
+ spi->mode & SPI_CS_HIGH ? 0 : 1);
+ break;
+ }
+ }
+}
+
+static int
+spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct sirfsoc_spi *sspi;
+ u8 bits_per_word = 0;
+ int hz = 0;
+ u32 regval;
+ u32 txfifo_ctrl, rxfifo_ctrl;
+ u32 fifo_size = SIRFSOC_SPI_FIFO_SIZE / 4;
+
+ sspi = spi_master_get_devdata(spi->master);
+
+ bits_per_word = (t) ? t->bits_per_word : spi->bits_per_word;
+ hz = t && t->speed_hz ? t->speed_hz : spi->max_speed_hz;
+
+ regval = (sspi->ctrl_freq / (2 * hz)) - 1;
+ if (regval > 0xFFFF || regval < 0) {
+ dev_err(&spi->dev, "Speed %d not supported\n", hz);
+ return -EINVAL;
+ }
+
+ switch (bits_per_word) {
+ case 8:
+ regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_8;
+ sspi->rx_word = spi_sirfsoc_rx_word_u8;
+ sspi->tx_word = spi_sirfsoc_tx_word_u8;
+ break;
+ case 12:
+ case 16:
+ regval |= (bits_per_word == 12) ?
+ SIRFSOC_SPI_TRAN_DAT_FORMAT_12 :
+ SIRFSOC_SPI_TRAN_DAT_FORMAT_16;
+ sspi->rx_word = spi_sirfsoc_rx_word_u16;
+ sspi->tx_word = spi_sirfsoc_tx_word_u16;
+ break;
+ case 32:
+ regval |= SIRFSOC_SPI_TRAN_DAT_FORMAT_32;
+ sspi->rx_word = spi_sirfsoc_rx_word_u32;
+ sspi->tx_word = spi_sirfsoc_tx_word_u32;
+ break;
+ default:
+ BUG();
+ }
+
+ sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
+ txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ (sspi->word_width >> 1);
+ rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
+ (sspi->word_width >> 1);
+
+ if (!(spi->mode & SPI_CS_HIGH))
+ regval |= SIRFSOC_SPI_CS_IDLE_STAT;
+ if (!(spi->mode & SPI_LSB_FIRST))
+ regval |= SIRFSOC_SPI_TRAN_MSB;
+ if (spi->mode & SPI_CPOL)
+ regval |= SIRFSOC_SPI_CLK_IDLE_STAT;
+
+ /*
+ * Data should be driven at least 1/2 cycle before the fetch edge
+ * to make sure that data gets stable at the fetch edge.
+ */
+ if (((spi->mode & SPI_CPOL) && (spi->mode & SPI_CPHA)) ||
+ (!(spi->mode & SPI_CPOL) && !(spi->mode & SPI_CPHA)))
+ regval &= ~SIRFSOC_SPI_DRV_POS_EDGE;
+ else
+ regval |= SIRFSOC_SPI_DRV_POS_EDGE;
+
+ writel(SIRFSOC_SPI_FIFO_SC(fifo_size - 2) |
+ SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
+ SIRFSOC_SPI_FIFO_HC(2),
+ sspi->base + SIRFSOC_SPI_TXFIFO_LEVEL_CHK);
+ writel(SIRFSOC_SPI_FIFO_SC(2) |
+ SIRFSOC_SPI_FIFO_LC(fifo_size / 2) |
+ SIRFSOC_SPI_FIFO_HC(fifo_size - 2),
+ sspi->base + SIRFSOC_SPI_RXFIFO_LEVEL_CHK);
+ writel(txfifo_ctrl, sspi->base + SIRFSOC_SPI_TXFIFO_CTRL);
+ writel(rxfifo_ctrl, sspi->base + SIRFSOC_SPI_RXFIFO_CTRL);
+
+ if (t && t->tx_buf && !t->rx_buf && (t->len <= SIRFSOC_MAX_CMD_BYTES)) {
+ regval |= (SIRFSOC_SPI_CMD_BYTE_NUM((t->len - 1)) |
+ SIRFSOC_SPI_CMD_MODE);
+ sspi->tx_by_cmd = true;
+ } else {
+ regval &= ~SIRFSOC_SPI_CMD_MODE;
+ sspi->tx_by_cmd = false;
+ }
+ /*
+ * it should never set to hardware cs mode because in hardware cs mode,
+ * cs signal can't controlled by driver.
+ */
+ regval |= SIRFSOC_SPI_CS_IO_MODE;
+ writel(regval, sspi->base + SIRFSOC_SPI_CTRL);
+
+ if (IS_DMA_VALID(t)) {
+ /* Enable DMA mode for RX, TX */
+ writel(0, sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_RX_DMA_FLUSH,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ } else {
+ /* Enable IO mode for RX, TX */
+ writel(SIRFSOC_SPI_IO_MODE_SEL,
+ sspi->base + SIRFSOC_SPI_TX_DMA_IO_CTRL);
+ writel(SIRFSOC_SPI_IO_MODE_SEL,
+ sspi->base + SIRFSOC_SPI_RX_DMA_IO_CTRL);
+ }
+
+ return 0;
+}
+
+static int spi_sirfsoc_setup(struct spi_device *spi)
+{
+ struct sirfsoc_spi *sspi;
+
+ sspi = spi_master_get_devdata(spi->master);
+
+ if (spi->cs_gpio == -ENOENT)
+ sspi->hw_cs = true;
+ else
+ sspi->hw_cs = false;
+ return spi_sirfsoc_setup_transfer(spi, NULL);
+}
+
+static int spi_sirfsoc_probe(struct platform_device *pdev)
+{
+ struct sirfsoc_spi *sspi;
+ struct spi_master *master;
+ struct resource *mem_res;
+ int irq;
+ int i, ret;
+
+ ret = device_reset(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "SPI reset failed!\n");
+ return ret;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*sspi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI master\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base = devm_ioremap_resource(&pdev->dev, mem_res);
+ if (IS_ERR(sspi->base)) {
+ ret = PTR_ERR(sspi->base);
+ goto free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = -ENXIO;
+ goto free_master;
+ }
+ ret = devm_request_irq(&pdev->dev, irq, spi_sirfsoc_irq, 0,
+ DRIVER_NAME, sspi);
+ if (ret)
+ goto free_master;
+
+ sspi->bitbang.master = master;
+ sspi->bitbang.chipselect = spi_sirfsoc_chipselect;
+ sspi->bitbang.setup_transfer = spi_sirfsoc_setup_transfer;
+ sspi->bitbang.txrx_bufs = spi_sirfsoc_transfer;
+ sspi->bitbang.master->setup = spi_sirfsoc_setup;
+ master->bus_num = pdev->id;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_CS_HIGH;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(12) |
+ SPI_BPW_MASK(16) | SPI_BPW_MASK(32);
+ master->max_speed_hz = SIRFSOC_SPI_DEFAULT_FRQ;
+ sspi->bitbang.master->dev.of_node = pdev->dev.of_node;
+
+ /* request DMA channels */
+ sspi->rx_chan = dma_request_slave_channel(&pdev->dev, "rx");
+ if (!sspi->rx_chan) {
+ dev_err(&pdev->dev, "can not allocate rx dma channel\n");
+ ret = -ENODEV;
+ goto free_master;
+ }
+ sspi->tx_chan = dma_request_slave_channel(&pdev->dev, "tx");
+ if (!sspi->tx_chan) {
+ dev_err(&pdev->dev, "can not allocate tx dma channel\n");
+ ret = -ENODEV;
+ goto free_rx_dma;
+ }
+
+ sspi->clk = clk_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->clk)) {
+ ret = PTR_ERR(sspi->clk);
+ goto free_tx_dma;
+ }
+ clk_prepare_enable(sspi->clk);
+ sspi->ctrl_freq = clk_get_rate(sspi->clk);
+
+ init_completion(&sspi->rx_done);
+ init_completion(&sspi->tx_done);
+
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ /* We are not using dummy delay between command and data */
+ writel(0, sspi->base + SIRFSOC_SPI_DUMMY_DELAY_CTL);
+
+ sspi->dummypage = kmalloc(2 * PAGE_SIZE, GFP_KERNEL);
+ if (!sspi->dummypage) {
+ ret = -ENOMEM;
+ goto free_clk;
+ }
+
+ ret = spi_bitbang_start(&sspi->bitbang);
+ if (ret)
+ goto free_dummypage;
+ for (i = 0; master->cs_gpios && i < master->num_chipselect; i++) {
+ if (master->cs_gpios[i] == -ENOENT)
+ continue;
+ if (!gpio_is_valid(master->cs_gpios[i])) {
+ dev_err(&pdev->dev, "no valid gpio\n");
+ ret = -EINVAL;
+ goto free_dummypage;
+ }
+ ret = devm_gpio_request(&pdev->dev,
+ master->cs_gpios[i], DRIVER_NAME);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request gpio\n");
+ goto free_dummypage;
+ }
+ }
+ dev_info(&pdev->dev, "registerred, bus number = %d\n", master->bus_num);
+
+ return 0;
+free_dummypage:
+ kfree(sspi->dummypage);
+free_clk:
+ clk_disable_unprepare(sspi->clk);
+ clk_put(sspi->clk);
+free_tx_dma:
+ dma_release_channel(sspi->tx_chan);
+free_rx_dma:
+ dma_release_channel(sspi->rx_chan);
+free_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int spi_sirfsoc_remove(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sirfsoc_spi *sspi;
+
+ master = platform_get_drvdata(pdev);
+ sspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&sspi->bitbang);
+ kfree(sspi->dummypage);
+ clk_disable_unprepare(sspi->clk);
+ clk_put(sspi->clk);
+ dma_release_channel(sspi->rx_chan);
+ dma_release_channel(sspi->tx_chan);
+ spi_master_put(master);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_sirfsoc_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ clk_disable(sspi->clk);
+ return 0;
+}
+
+static int spi_sirfsoc_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sirfsoc_spi *sspi = spi_master_get_devdata(master);
+
+ clk_enable(sspi->clk);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_RESET, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_RXFIFO_OP);
+ writel(SIRFSOC_SPI_FIFO_START, sspi->base + SIRFSOC_SPI_TXFIFO_OP);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(spi_sirfsoc_pm_ops, spi_sirfsoc_suspend,
+ spi_sirfsoc_resume);
+
+static const struct of_device_id spi_sirfsoc_of_match[] = {
+ { .compatible = "sirf,prima2-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, spi_sirfsoc_of_match);
+
+static struct platform_driver spi_sirfsoc_driver = {
+ .driver = {
+ .name = DRIVER_NAME,
+ .pm = &spi_sirfsoc_pm_ops,
+ .of_match_table = spi_sirfsoc_of_match,
+ },
+ .probe = spi_sirfsoc_probe,
+ .remove = spi_sirfsoc_remove,
+};
+module_platform_driver(spi_sirfsoc_driver);
+MODULE_DESCRIPTION("SiRF SoC SPI master driver");
+MODULE_AUTHOR("Zhiwu Song <Zhiwu.Song@csr.com>");
+MODULE_AUTHOR("Barry Song <Baohua.Song@csr.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-st-ssc4.c b/drivers/spi/spi-st-ssc4.c
new file mode 100644
index 000000000..f17c0abe2
--- /dev/null
+++ b/drivers/spi/spi-st-ssc4.c
@@ -0,0 +1,504 @@
+/*
+ * Copyright (c) 2008-2014 STMicroelectronics Limited
+ *
+ * Author: Angus Clark <Angus.Clark@st.com>
+ * Patrice Chotard <patrice.chotard@st.com>
+ * Lee Jones <lee.jones@linaro.org>
+ *
+ * SPI master mode controller driver, used in STMicroelectronics devices.
+ *
+ * May be copied or modified under the terms of the GNU General Public
+ * License Version 2.0 only. See linux/COPYING for more information.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/of_gpio.h>
+#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+/* SSC registers */
+#define SSC_BRG 0x000
+#define SSC_TBUF 0x004
+#define SSC_RBUF 0x008
+#define SSC_CTL 0x00C
+#define SSC_IEN 0x010
+#define SSC_I2C 0x018
+
+/* SSC Control */
+#define SSC_CTL_DATA_WIDTH_9 0x8
+#define SSC_CTL_DATA_WIDTH_MSK 0xf
+#define SSC_CTL_BM 0xf
+#define SSC_CTL_HB BIT(4)
+#define SSC_CTL_PH BIT(5)
+#define SSC_CTL_PO BIT(6)
+#define SSC_CTL_SR BIT(7)
+#define SSC_CTL_MS BIT(8)
+#define SSC_CTL_EN BIT(9)
+#define SSC_CTL_LPB BIT(10)
+#define SSC_CTL_EN_TX_FIFO BIT(11)
+#define SSC_CTL_EN_RX_FIFO BIT(12)
+#define SSC_CTL_EN_CLST_RX BIT(13)
+
+/* SSC Interrupt Enable */
+#define SSC_IEN_TEEN BIT(2)
+
+#define FIFO_SIZE 8
+
+struct spi_st {
+ /* SSC SPI Controller */
+ void __iomem *base;
+ struct clk *clk;
+ struct device *dev;
+
+ /* SSC SPI current transaction */
+ const u8 *tx_ptr;
+ u8 *rx_ptr;
+ u16 bytes_per_word;
+ unsigned int words_remaining;
+ unsigned int baud;
+ struct completion done;
+};
+
+static int spi_st_clk_enable(struct spi_st *spi_st)
+{
+ /*
+ * Current platforms use one of the core clocks for SPI and I2C.
+ * If we attempt to disable the clock, the system will hang.
+ *
+ * TODO: Remove this when platform supports power domains.
+ */
+ return 0;
+
+ return clk_prepare_enable(spi_st->clk);
+}
+
+static void spi_st_clk_disable(struct spi_st *spi_st)
+{
+ /*
+ * Current platforms use one of the core clocks for SPI and I2C.
+ * If we attempt to disable the clock, the system will hang.
+ *
+ * TODO: Remove this when platform supports power domains.
+ */
+ return;
+
+ clk_disable_unprepare(spi_st->clk);
+}
+
+/* Load the TX FIFO */
+static void ssc_write_tx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ if (spi_st->tx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ word = *spi_st->tx_ptr++;
+ } else {
+ word = *spi_st->tx_ptr++;
+ word = *spi_st->tx_ptr++ | (word << 8);
+ }
+ }
+ writel_relaxed(word, spi_st->base + SSC_TBUF);
+ }
+}
+
+/* Read the RX FIFO */
+static void ssc_read_rx_fifo(struct spi_st *spi_st)
+{
+ unsigned int count, i;
+ uint32_t word = 0;
+
+ if (spi_st->words_remaining > FIFO_SIZE)
+ count = FIFO_SIZE;
+ else
+ count = spi_st->words_remaining;
+
+ for (i = 0; i < count; i++) {
+ word = readl_relaxed(spi_st->base + SSC_RBUF);
+
+ if (spi_st->rx_ptr) {
+ if (spi_st->bytes_per_word == 1) {
+ *spi_st->rx_ptr++ = (uint8_t)word;
+ } else {
+ *spi_st->rx_ptr++ = (word >> 8);
+ *spi_st->rx_ptr++ = word & 0xff;
+ }
+ }
+ }
+ spi_st->words_remaining -= count;
+}
+
+static int spi_st_transfer_one(struct spi_master *master,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ uint32_t ctl = 0;
+
+ /* Setup transfer */
+ spi_st->tx_ptr = t->tx_buf;
+ spi_st->rx_ptr = t->rx_buf;
+
+ if (spi->bits_per_word > 8) {
+ /*
+ * Anything greater than 8 bits-per-word requires 2
+ * bytes-per-word in the RX/TX buffers
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ } else if (spi->bits_per_word == 8 && !(t->len & 0x1)) {
+ /*
+ * If transfer is even-length, and 8 bits-per-word, then
+ * implement as half-length 16 bits-per-word transfer
+ */
+ spi_st->bytes_per_word = 2;
+ spi_st->words_remaining = t->len / 2;
+
+ /* Set SSC_CTL to 16 bits-per-word */
+ ctl = readl_relaxed(spi_st->base + SSC_CTL);
+ writel_relaxed((ctl | 0xf), spi_st->base + SSC_CTL);
+
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ } else {
+ spi_st->bytes_per_word = 1;
+ spi_st->words_remaining = t->len;
+ }
+
+ reinit_completion(&spi_st->done);
+
+ /* Start transfer by writing to the TX FIFO */
+ ssc_write_tx_fifo(spi_st);
+ writel_relaxed(SSC_IEN_TEEN, spi_st->base + SSC_IEN);
+
+ /* Wait for transfer to complete */
+ wait_for_completion(&spi_st->done);
+
+ /* Restore SSC_CTL if necessary */
+ if (ctl)
+ writel_relaxed(ctl, spi_st->base + SSC_CTL);
+
+ spi_finalize_current_transfer(spi->master);
+
+ return t->len;
+}
+
+static void spi_st_cleanup(struct spi_device *spi)
+{
+ int cs = spi->cs_gpio;
+
+ if (gpio_is_valid(cs))
+ devm_gpio_free(&spi->dev, cs);
+}
+
+/* the spi->mode bits understood by this driver: */
+#define MODEBITS (SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP | SPI_CS_HIGH)
+static int spi_st_setup(struct spi_device *spi)
+{
+ struct spi_st *spi_st = spi_master_get_devdata(spi->master);
+ u32 spi_st_clk, sscbrg, var;
+ u32 hz = spi->max_speed_hz;
+ int cs = spi->cs_gpio;
+ int ret;
+
+ if (!hz) {
+ dev_err(&spi->dev, "max_speed_hz unspecified\n");
+ return -EINVAL;
+ }
+
+ if (!gpio_is_valid(cs)) {
+ dev_err(&spi->dev, "%d is not a valid gpio\n", cs);
+ return -EINVAL;
+ }
+
+ if (devm_gpio_request(&spi->dev, cs, dev_name(&spi->dev))) {
+ dev_err(&spi->dev, "could not request gpio:%d\n", cs);
+ return -EINVAL;
+ }
+
+ ret = gpio_direction_output(cs, spi->mode & SPI_CS_HIGH);
+ if (ret)
+ return ret;
+
+ spi_st_clk = clk_get_rate(spi_st->clk);
+
+ /* Set SSC_BRF */
+ sscbrg = spi_st_clk / (2 * hz);
+ if (sscbrg < 0x07 || sscbrg > BIT(16)) {
+ dev_err(&spi->dev,
+ "baudrate %d outside valid range %d\n", sscbrg, hz);
+ return -EINVAL;
+ }
+
+ spi_st->baud = spi_st_clk / (2 * sscbrg);
+ if (sscbrg == BIT(16)) /* 16-bit counter wraps */
+ sscbrg = 0x0;
+
+ writel_relaxed(sscbrg, spi_st->base + SSC_BRG);
+
+ dev_dbg(&spi->dev,
+ "setting baudrate:target= %u hz, actual= %u hz, sscbrg= %u\n",
+ hz, spi_st->baud, sscbrg);
+
+ /* Set SSC_CTL and enable SSC */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_MS;
+
+ if (spi->mode & SPI_CPOL)
+ var |= SSC_CTL_PO;
+ else
+ var &= ~SSC_CTL_PO;
+
+ if (spi->mode & SPI_CPHA)
+ var |= SSC_CTL_PH;
+ else
+ var &= ~SSC_CTL_PH;
+
+ if ((spi->mode & SPI_LSB_FIRST) == 0)
+ var |= SSC_CTL_HB;
+ else
+ var &= ~SSC_CTL_HB;
+
+ if (spi->mode & SPI_LOOP)
+ var |= SSC_CTL_LPB;
+ else
+ var &= ~SSC_CTL_LPB;
+
+ var &= ~SSC_CTL_DATA_WIDTH_MSK;
+ var |= (spi->bits_per_word - 1);
+
+ var |= SSC_CTL_EN_TX_FIFO | SSC_CTL_EN_RX_FIFO;
+ var |= SSC_CTL_EN;
+
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Clear the status register */
+ readl_relaxed(spi_st->base + SSC_RBUF);
+
+ return 0;
+}
+
+/* Interrupt fired when TX shift register becomes empty */
+static irqreturn_t spi_st_irq(int irq, void *dev_id)
+{
+ struct spi_st *spi_st = (struct spi_st *)dev_id;
+
+ /* Read RX FIFO */
+ ssc_read_rx_fifo(spi_st);
+
+ /* Fill TX FIFO */
+ if (spi_st->words_remaining) {
+ ssc_write_tx_fifo(spi_st);
+ } else {
+ /* TX/RX complete */
+ writel_relaxed(0x0, spi_st->base + SSC_IEN);
+ /*
+ * read SSC_IEN to ensure that this bit is set
+ * before re-enabling interrupt
+ */
+ readl(spi_st->base + SSC_IEN);
+ complete(&spi_st->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int spi_st_probe(struct platform_device *pdev)
+{
+ struct device_node *np = pdev->dev.of_node;
+ struct spi_master *master;
+ struct resource *res;
+ struct spi_st *spi_st;
+ int irq, ret = 0;
+ u32 var;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*spi_st));
+ if (!master)
+ return -ENOMEM;
+
+ master->dev.of_node = np;
+ master->mode_bits = MODEBITS;
+ master->setup = spi_st_setup;
+ master->cleanup = spi_st_cleanup;
+ master->transfer_one = spi_st_transfer_one;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->auto_runtime_pm = true;
+ master->bus_num = pdev->id;
+ spi_st = spi_master_get_devdata(master);
+
+ spi_st->clk = devm_clk_get(&pdev->dev, "ssc");
+ if (IS_ERR(spi_st->clk)) {
+ dev_err(&pdev->dev, "Unable to request clock\n");
+ return PTR_ERR(spi_st->clk);
+ }
+
+ ret = spi_st_clk_enable(spi_st);
+ if (ret)
+ return ret;
+
+ init_completion(&spi_st->done);
+
+ /* Get resources */
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ spi_st->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(spi_st->base)) {
+ ret = PTR_ERR(spi_st->base);
+ goto clk_disable;
+ }
+
+ /* Disable I2C and Reset SSC */
+ writel_relaxed(0x0, spi_st->base + SSC_I2C);
+ var = readw_relaxed(spi_st->base + SSC_CTL);
+ var |= SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ udelay(1);
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_SR;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ /* Set SSC into slave mode before reconfiguring PIO pins */
+ var = readl_relaxed(spi_st->base + SSC_CTL);
+ var &= ~SSC_CTL_MS;
+ writel_relaxed(var, spi_st->base + SSC_CTL);
+
+ irq = irq_of_parse_and_map(np, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "IRQ missing or invalid\n");
+ ret = -EINVAL;
+ goto clk_disable;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, spi_st_irq, 0,
+ pdev->name, spi_st);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to request irq %d\n", irq);
+ goto clk_disable;
+ }
+
+ /* by default the device is on */
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
+ platform_set_drvdata(pdev, master);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to register master\n");
+ goto clk_disable;
+ }
+
+ return 0;
+
+clk_disable:
+ spi_st_clk_disable(spi_st);
+
+ return ret;
+}
+
+static int spi_st_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ spi_st_clk_disable(spi_st);
+
+ pinctrl_pm_select_sleep_state(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int spi_st_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+
+ writel_relaxed(0, spi_st->base + SSC_IEN);
+ pinctrl_pm_select_sleep_state(dev);
+
+ spi_st_clk_disable(spi_st);
+
+ return 0;
+}
+
+static int spi_st_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct spi_st *spi_st = spi_master_get_devdata(master);
+ int ret;
+
+ ret = spi_st_clk_enable(spi_st);
+ pinctrl_pm_select_default_state(dev);
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_PM_SLEEP
+static int spi_st_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_suspend(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_suspend(dev);
+}
+
+static int spi_st_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ int ret;
+
+ ret = spi_master_resume(master);
+ if (ret)
+ return ret;
+
+ return pm_runtime_force_resume(dev);
+}
+#endif
+
+static const struct dev_pm_ops spi_st_pm = {
+ SET_SYSTEM_SLEEP_PM_OPS(spi_st_suspend, spi_st_resume)
+ SET_RUNTIME_PM_OPS(spi_st_runtime_suspend, spi_st_runtime_resume, NULL)
+};
+
+static const struct of_device_id stm_spi_match[] = {
+ { .compatible = "st,comms-ssc4-spi", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, stm_spi_match);
+
+static struct platform_driver spi_st_driver = {
+ .driver = {
+ .name = "spi-st",
+ .pm = &spi_st_pm,
+ .of_match_table = of_match_ptr(stm_spi_match),
+ },
+ .probe = spi_st_probe,
+ .remove = spi_st_remove,
+};
+module_platform_driver(spi_st_driver);
+
+MODULE_AUTHOR("Patrice Chotard <patrice.chotard@st.com>");
+MODULE_DESCRIPTION("STM SSC SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-sun4i.c b/drivers/spi/spi-sun4i.c
new file mode 100644
index 000000000..fbb0a4d74
--- /dev/null
+++ b/drivers/spi/spi-sun4i.c
@@ -0,0 +1,476 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN4I_FIFO_DEPTH 64
+
+#define SUN4I_RXDATA_REG 0x00
+
+#define SUN4I_TXDATA_REG 0x04
+
+#define SUN4I_CTL_REG 0x08
+#define SUN4I_CTL_ENABLE BIT(0)
+#define SUN4I_CTL_MASTER BIT(1)
+#define SUN4I_CTL_CPHA BIT(2)
+#define SUN4I_CTL_CPOL BIT(3)
+#define SUN4I_CTL_CS_ACTIVE_LOW BIT(4)
+#define SUN4I_CTL_LMTF BIT(6)
+#define SUN4I_CTL_TF_RST BIT(8)
+#define SUN4I_CTL_RF_RST BIT(9)
+#define SUN4I_CTL_XCH BIT(10)
+#define SUN4I_CTL_CS_MASK 0x3000
+#define SUN4I_CTL_CS(cs) (((cs) << 12) & SUN4I_CTL_CS_MASK)
+#define SUN4I_CTL_DHB BIT(15)
+#define SUN4I_CTL_CS_MANUAL BIT(16)
+#define SUN4I_CTL_CS_LEVEL BIT(17)
+#define SUN4I_CTL_TP BIT(18)
+
+#define SUN4I_INT_CTL_REG 0x0c
+#define SUN4I_INT_CTL_TC BIT(16)
+
+#define SUN4I_INT_STA_REG 0x10
+
+#define SUN4I_DMA_CTL_REG 0x14
+
+#define SUN4I_WAIT_REG 0x18
+
+#define SUN4I_CLK_CTL_REG 0x1c
+#define SUN4I_CLK_CTL_CDR2_MASK 0xff
+#define SUN4I_CLK_CTL_CDR2(div) ((div) & SUN4I_CLK_CTL_CDR2_MASK)
+#define SUN4I_CLK_CTL_CDR1_MASK 0xf
+#define SUN4I_CLK_CTL_CDR1(div) (((div) & SUN4I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN4I_CLK_CTL_DRS BIT(12)
+
+#define SUN4I_BURST_CNT_REG 0x20
+#define SUN4I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_XMIT_CNT_REG 0x24
+#define SUN4I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN4I_FIFO_STA_REG 0x28
+#define SUN4I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_RF_CNT_BITS 0
+#define SUN4I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN4I_FIFO_STA_TF_CNT_BITS 16
+
+struct sun4i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun4i_spi_read(struct sun4i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_write(struct sun4i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun4i_spi_drain_fifo(struct sun4i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun4i_spi_read(sspi, SUN4I_FIFO_STA_REG);
+ reg &= SUN4I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN4I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN4I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun4i_spi_fill_fifo(struct sun4i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN4I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun4i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ reg &= ~SUN4I_CTL_CS_MASK;
+ reg |= SUN4I_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN4I_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN4I_CTL_CS_LEVEL;
+
+ /*
+ * Even though this looks irrelevant since we are supposed to
+ * be controlling the chip select manually, this bit also
+ * controls the levels of the chip select for inactive
+ * devices.
+ *
+ * If we don't set it, the chip select level will go low by
+ * default when the device is idle, which is not really
+ * expected in the common case where the chip select is active
+ * low.
+ */
+ if (spi->mode & SPI_CS_HIGH)
+ reg &= ~SUN4I_CTL_CS_ACTIVE_LOW;
+ else
+ reg |= SUN4I_CTL_CS_ACTIVE_LOW;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+}
+
+static int sun4i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN4I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, ~0);
+
+
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+
+ /* Reset FIFOs */
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ reg | SUN4I_CTL_RF_RST | SUN4I_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN4I_CTL_CPOL;
+ else
+ reg &= ~SUN4I_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN4I_CTL_CPHA;
+ else
+ reg &= ~SUN4I_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN4I_CTL_LMTF;
+ else
+ reg &= ~SUN4I_CTL_LMTF;
+
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN4I_CTL_DHB;
+ else
+ reg |= SUN4I_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN4I_CTL_CS_MANUAL;
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ (cdr + 1))
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN4I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN4I_CLK_CTL_CDR2(div) | SUN4I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN4I_CLK_CTL_CDR1(div);
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun4i_spi_write(sspi, SUN4I_BURST_CNT_REG, SUN4I_BURST_CNT(tfr->len));
+ sun4i_spi_write(sspi, SUN4I_XMIT_CNT_REG, SUN4I_XMIT_CNT(tx_len));
+
+ /* Fill the TX FIFO */
+ sun4i_spi_fill_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, SUN4I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun4i_spi_read(sspi, SUN4I_CTL_REG);
+ sun4i_spi_write(sspi, SUN4I_CTL_REG, reg | SUN4I_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun4i_spi_drain_fifo(sspi, SUN4I_FIFO_DEPTH);
+
+out:
+ sun4i_spi_write(sspi, SUN4I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun4i_spi_handler(int irq, void *dev_id)
+{
+ struct sun4i_spi *sspi = dev_id;
+ u32 status = sun4i_spi_read(sspi, SUN4I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN4I_INT_CTL_TC) {
+ sun4i_spi_write(sspi, SUN4I_INT_STA_REG, SUN4I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun4i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ sun4i_spi_write(sspi, SUN4I_CTL_REG,
+ SUN4I_CTL_ENABLE | SUN4I_CTL_MASTER | SUN4I_CTL_TP);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun4i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun4i_spi *sspi = spi_master_get_devdata(master);
+
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun4i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun4i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun4i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun4i_spi_handler,
+ 0, "sun4i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun4i_spi_set_cs;
+ master->transfer_one = sun4i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun4i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun4i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun4i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun4i_spi_match[] = {
+ { .compatible = "allwinner,sun4i-a10-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun4i_spi_match);
+
+static const struct dev_pm_ops sun4i_spi_pm_ops = {
+ .runtime_resume = sun4i_spi_runtime_resume,
+ .runtime_suspend = sun4i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun4i_spi_driver = {
+ .probe = sun4i_spi_probe,
+ .remove = sun4i_spi_remove,
+ .driver = {
+ .name = "sun4i-spi",
+ .of_match_table = sun4i_spi_match,
+ .pm = &sun4i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun4i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A1X/A20 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-sun6i.c b/drivers/spi/spi-sun6i.c
new file mode 100644
index 000000000..ac48f5970
--- /dev/null
+++ b/drivers/spi/spi-sun6i.c
@@ -0,0 +1,482 @@
+/*
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ *
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/device.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/reset.h>
+
+#include <linux/spi/spi.h>
+
+#define SUN6I_FIFO_DEPTH 128
+
+#define SUN6I_GBL_CTL_REG 0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE BIT(0)
+#define SUN6I_GBL_CTL_MASTER BIT(1)
+#define SUN6I_GBL_CTL_TP BIT(7)
+#define SUN6I_GBL_CTL_RST BIT(31)
+
+#define SUN6I_TFR_CTL_REG 0x08
+#define SUN6I_TFR_CTL_CPHA BIT(0)
+#define SUN6I_TFR_CTL_CPOL BIT(1)
+#define SUN6I_TFR_CTL_SPOL BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK 0x30
+#define SUN6I_TFR_CTL_CS(cs) (((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL BIT(7)
+#define SUN6I_TFR_CTL_DHB BIT(8)
+#define SUN6I_TFR_CTL_FBS BIT(12)
+#define SUN6I_TFR_CTL_XCH BIT(31)
+
+#define SUN6I_INT_CTL_REG 0x10
+#define SUN6I_INT_CTL_RF_OVF BIT(8)
+#define SUN6I_INT_CTL_TC BIT(12)
+
+#define SUN6I_INT_STA_REG 0x14
+
+#define SUN6I_FIFO_CTL_REG 0x18
+#define SUN6I_FIFO_CTL_RF_RST BIT(15)
+#define SUN6I_FIFO_CTL_TF_RST BIT(31)
+
+#define SUN6I_FIFO_STA_REG 0x1c
+#define SUN6I_FIFO_STA_RF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_RF_CNT_BITS 0
+#define SUN6I_FIFO_STA_TF_CNT_MASK 0x7f
+#define SUN6I_FIFO_STA_TF_CNT_BITS 16
+
+#define SUN6I_CLK_CTL_REG 0x24
+#define SUN6I_CLK_CTL_CDR2_MASK 0xff
+#define SUN6I_CLK_CTL_CDR2(div) (((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK 0xf
+#define SUN6I_CLK_CTL_CDR1(div) (((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS BIT(12)
+
+#define SUN6I_BURST_CNT_REG 0x30
+#define SUN6I_BURST_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_XMIT_CNT_REG 0x34
+#define SUN6I_XMIT_CNT(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_BURST_CTL_CNT_REG 0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt) ((cnt) & 0xffffff)
+
+#define SUN6I_TXDATA_REG 0x200
+#define SUN6I_RXDATA_REG 0x300
+
+struct sun6i_spi {
+ struct spi_master *master;
+ void __iomem *base_addr;
+ struct clk *hclk;
+ struct clk *mclk;
+ struct reset_control *rstc;
+
+ struct completion done;
+
+ const u8 *tx_buf;
+ u8 *rx_buf;
+ int len;
+};
+
+static inline u32 sun6i_spi_read(struct sun6i_spi *sspi, u32 reg)
+{
+ return readl(sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_write(struct sun6i_spi *sspi, u32 reg, u32 value)
+{
+ writel(value, sspi->base_addr + reg);
+}
+
+static inline void sun6i_spi_drain_fifo(struct sun6i_spi *sspi, int len)
+{
+ u32 reg, cnt;
+ u8 byte;
+
+ /* See how much data is available */
+ reg = sun6i_spi_read(sspi, SUN6I_FIFO_STA_REG);
+ reg &= SUN6I_FIFO_STA_RF_CNT_MASK;
+ cnt = reg >> SUN6I_FIFO_STA_RF_CNT_BITS;
+
+ if (len > cnt)
+ len = cnt;
+
+ while (len--) {
+ byte = readb(sspi->base_addr + SUN6I_RXDATA_REG);
+ if (sspi->rx_buf)
+ *sspi->rx_buf++ = byte;
+ }
+}
+
+static inline void sun6i_spi_fill_fifo(struct sun6i_spi *sspi, int len)
+{
+ u8 byte;
+
+ if (len > sspi->len)
+ len = sspi->len;
+
+ while (len--) {
+ byte = sspi->tx_buf ? *sspi->tx_buf++ : 0;
+ writeb(byte, sspi->base_addr + SUN6I_TXDATA_REG);
+ sspi->len--;
+ }
+}
+
+static void sun6i_spi_set_cs(struct spi_device *spi, bool enable)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(spi->master);
+ u32 reg;
+
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ reg &= ~SUN6I_TFR_CTL_CS_MASK;
+ reg |= SUN6I_TFR_CTL_CS(spi->chip_select);
+
+ if (enable)
+ reg |= SUN6I_TFR_CTL_CS_LEVEL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CS_LEVEL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+}
+
+
+static int sun6i_spi_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *tfr)
+{
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ unsigned int mclk_rate, div, timeout;
+ unsigned int tx_len = 0;
+ int ret = 0;
+ u32 reg;
+
+ /* We don't support transfer larger than the FIFO */
+ if (tfr->len > SUN6I_FIFO_DEPTH)
+ return -EINVAL;
+
+ reinit_completion(&sspi->done);
+ sspi->tx_buf = tfr->tx_buf;
+ sspi->rx_buf = tfr->rx_buf;
+ sspi->len = tfr->len;
+
+ /* Clear pending interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, ~0);
+
+ /* Reset FIFO */
+ sun6i_spi_write(sspi, SUN6I_FIFO_CTL_REG,
+ SUN6I_FIFO_CTL_RF_RST | SUN6I_FIFO_CTL_TF_RST);
+
+ /*
+ * Setup the transfer control register: Chip Select,
+ * polarities, etc.
+ */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+
+ if (spi->mode & SPI_CPOL)
+ reg |= SUN6I_TFR_CTL_CPOL;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ reg |= SUN6I_TFR_CTL_CPHA;
+ else
+ reg &= ~SUN6I_TFR_CTL_CPHA;
+
+ if (spi->mode & SPI_LSB_FIRST)
+ reg |= SUN6I_TFR_CTL_FBS;
+ else
+ reg &= ~SUN6I_TFR_CTL_FBS;
+
+ /*
+ * If it's a TX only transfer, we don't want to fill the RX
+ * FIFO with bogus data
+ */
+ if (sspi->rx_buf)
+ reg &= ~SUN6I_TFR_CTL_DHB;
+ else
+ reg |= SUN6I_TFR_CTL_DHB;
+
+ /* We want to control the chip select manually */
+ reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg);
+
+ /* Ensure that we have a parent clock fast enough */
+ mclk_rate = clk_get_rate(sspi->mclk);
+ if (mclk_rate < (2 * spi->max_speed_hz)) {
+ clk_set_rate(sspi->mclk, 2 * spi->max_speed_hz);
+ mclk_rate = clk_get_rate(sspi->mclk);
+ }
+
+ /*
+ * Setup clock divider.
+ *
+ * We have two choices there. Either we can use the clock
+ * divide rate 1, which is calculated thanks to this formula:
+ * SPI_CLK = MOD_CLK / (2 ^ cdr)
+ * Or we can use CDR2, which is calculated with the formula:
+ * SPI_CLK = MOD_CLK / (2 * (cdr + 1))
+ * Wether we use the former or the latter is set through the
+ * DRS bit.
+ *
+ * First try CDR2, and if we can't reach the expected
+ * frequency, fall back to CDR1.
+ */
+ div = mclk_rate / (2 * spi->max_speed_hz);
+ if (div <= (SUN6I_CLK_CTL_CDR2_MASK + 1)) {
+ if (div > 0)
+ div--;
+
+ reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+ } else {
+ div = ilog2(mclk_rate) - ilog2(spi->max_speed_hz);
+ reg = SUN6I_CLK_CTL_CDR1(div);
+ }
+
+ sun6i_spi_write(sspi, SUN6I_CLK_CTL_REG, reg);
+
+ /* Setup the transfer now... */
+ if (sspi->tx_buf)
+ tx_len = tfr->len;
+
+ /* Setup the counters */
+ sun6i_spi_write(sspi, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(tfr->len));
+ sun6i_spi_write(sspi, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+ sun6i_spi_write(sspi, SUN6I_BURST_CTL_CNT_REG,
+ SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+ /* Fill the TX FIFO */
+ sun6i_spi_fill_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+ /* Enable the interrupts */
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, SUN6I_INT_CTL_TC);
+
+ /* Start the transfer */
+ reg = sun6i_spi_read(sspi, SUN6I_TFR_CTL_REG);
+ sun6i_spi_write(sspi, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+
+ timeout = wait_for_completion_timeout(&sspi->done,
+ msecs_to_jiffies(1000));
+ if (!timeout) {
+ ret = -ETIMEDOUT;
+ goto out;
+ }
+
+ sun6i_spi_drain_fifo(sspi, SUN6I_FIFO_DEPTH);
+
+out:
+ sun6i_spi_write(sspi, SUN6I_INT_CTL_REG, 0);
+
+ return ret;
+}
+
+static irqreturn_t sun6i_spi_handler(int irq, void *dev_id)
+{
+ struct sun6i_spi *sspi = dev_id;
+ u32 status = sun6i_spi_read(sspi, SUN6I_INT_STA_REG);
+
+ /* Transfer complete */
+ if (status & SUN6I_INT_CTL_TC) {
+ sun6i_spi_write(sspi, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+ complete(&sspi->done);
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+static int sun6i_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(sspi->hclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable AHB clock\n");
+ goto out;
+ }
+
+ ret = clk_prepare_enable(sspi->mclk);
+ if (ret) {
+ dev_err(dev, "Couldn't enable module clock\n");
+ goto err;
+ }
+
+ ret = reset_control_deassert(sspi->rstc);
+ if (ret) {
+ dev_err(dev, "Couldn't deassert the device from reset\n");
+ goto err2;
+ }
+
+ sun6i_spi_write(sspi, SUN6I_GBL_CTL_REG,
+ SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER | SUN6I_GBL_CTL_TP);
+
+ return 0;
+
+err2:
+ clk_disable_unprepare(sspi->mclk);
+err:
+ clk_disable_unprepare(sspi->hclk);
+out:
+ return ret;
+}
+
+static int sun6i_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct sun6i_spi *sspi = spi_master_get_devdata(master);
+
+ reset_control_assert(sspi->rstc);
+ clk_disable_unprepare(sspi->mclk);
+ clk_disable_unprepare(sspi->hclk);
+
+ return 0;
+}
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct sun6i_spi *sspi;
+ struct resource *res;
+ int ret = 0, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct sun6i_spi));
+ if (!master) {
+ dev_err(&pdev->dev, "Unable to allocate SPI Master\n");
+ return -ENOMEM;
+ }
+
+ platform_set_drvdata(pdev, master);
+ sspi = spi_master_get_devdata(master);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ sspi->base_addr = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(sspi->base_addr)) {
+ ret = PTR_ERR(sspi->base_addr);
+ goto err_free_master;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "No spi IRQ specified\n");
+ ret = -ENXIO;
+ goto err_free_master;
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, sun6i_spi_handler,
+ 0, "sun6i-spi", sspi);
+ if (ret) {
+ dev_err(&pdev->dev, "Cannot request IRQ\n");
+ goto err_free_master;
+ }
+
+ sspi->master = master;
+ master->set_cs = sun6i_spi_set_cs;
+ master->transfer_one = sun6i_spi_transfer_one;
+ master->num_chipselect = 4;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->dev.of_node = pdev->dev.of_node;
+ master->auto_runtime_pm = true;
+
+ sspi->hclk = devm_clk_get(&pdev->dev, "ahb");
+ if (IS_ERR(sspi->hclk)) {
+ dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+ ret = PTR_ERR(sspi->hclk);
+ goto err_free_master;
+ }
+
+ sspi->mclk = devm_clk_get(&pdev->dev, "mod");
+ if (IS_ERR(sspi->mclk)) {
+ dev_err(&pdev->dev, "Unable to acquire module clock\n");
+ ret = PTR_ERR(sspi->mclk);
+ goto err_free_master;
+ }
+
+ init_completion(&sspi->done);
+
+ sspi->rstc = devm_reset_control_get(&pdev->dev, NULL);
+ if (IS_ERR(sspi->rstc)) {
+ dev_err(&pdev->dev, "Couldn't get reset controller\n");
+ ret = PTR_ERR(sspi->rstc);
+ goto err_free_master;
+ }
+
+ /*
+ * This wake-up/shutdown pattern is to be able to have the
+ * device woken up, even if runtime_pm is disabled
+ */
+ ret = sun6i_spi_runtime_resume(&pdev->dev);
+ if (ret) {
+ dev_err(&pdev->dev, "Couldn't resume the device\n");
+ goto err_free_master;
+ }
+
+ pm_runtime_set_active(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+ pm_runtime_idle(&pdev->dev);
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret) {
+ dev_err(&pdev->dev, "cannot register SPI master\n");
+ goto err_pm_disable;
+ }
+
+ return 0;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ sun6i_spi_runtime_suspend(&pdev->dev);
+err_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+ { .compatible = "allwinner,sun6i-a31-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static const struct dev_pm_ops sun6i_spi_pm_ops = {
+ .runtime_resume = sun6i_spi_runtime_resume,
+ .runtime_suspend = sun6i_spi_runtime_suspend,
+};
+
+static struct platform_driver sun6i_spi_driver = {
+ .probe = sun6i_spi_probe,
+ .remove = sun6i_spi_remove,
+ .driver = {
+ .name = "sun6i-spi",
+ .of_match_table = sun6i_spi_match,
+ .pm = &sun6i_spi_pm_ops,
+ },
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_AUTHOR("Pan Nan <pannan@allwinnertech.com>");
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner A31 SPI controller driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-tegra114.c b/drivers/spi/spi-tegra114.c
new file mode 100644
index 000000000..73779cecc
--- /dev/null
+++ b/drivers/spi/spi-tegra114.c
@@ -0,0 +1,1237 @@
+/*
+ * SPI driver for NVIDIA's Tegra114 SPI Controller.
+ *
+ * Copyright (c) 2013, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND1 0x000
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SPI_PACKED (1 << 5)
+#define SPI_TX_EN (1 << 11)
+#define SPI_RX_EN (1 << 12)
+#define SPI_BOTH_EN_BYTE (1 << 13)
+#define SPI_BOTH_EN_BIT (1 << 14)
+#define SPI_LSBYTE_FE (1 << 15)
+#define SPI_LSBIT_FE (1 << 16)
+#define SPI_BIDIROE (1 << 17)
+#define SPI_IDLE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_IDLE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_IDLE_SDA_PULL_LOW (2 << 18)
+#define SPI_IDLE_SDA_PULL_HIGH (3 << 18)
+#define SPI_IDLE_SDA_MASK (3 << 18)
+#define SPI_CS_SS_VAL (1 << 20)
+#define SPI_CS_SW_HW (1 << 21)
+/* SPI_CS_POL_INACTIVE bits are default high */
+ /* n from 0 to 3 */
+#define SPI_CS_POL_INACTIVE(n) (1 << (22 + (n)))
+#define SPI_CS_POL_INACTIVE_MASK (0xF << 22)
+
+#define SPI_CS_SEL_0 (0 << 26)
+#define SPI_CS_SEL_1 (1 << 26)
+#define SPI_CS_SEL_2 (2 << 26)
+#define SPI_CS_SEL_3 (3 << 26)
+#define SPI_CS_SEL_MASK (3 << 26)
+#define SPI_CS_SEL(x) (((x) & 0x3) << 26)
+#define SPI_CONTROL_MODE_0 (0 << 28)
+#define SPI_CONTROL_MODE_1 (1 << 28)
+#define SPI_CONTROL_MODE_2 (2 << 28)
+#define SPI_CONTROL_MODE_3 (3 << 28)
+#define SPI_CONTROL_MODE_MASK (3 << 28)
+#define SPI_MODE_SEL(x) (((x) & 0x3) << 28)
+#define SPI_M_S (1 << 30)
+#define SPI_PIO (1 << 31)
+
+#define SPI_COMMAND2 0x004
+#define SPI_TX_TAP_DELAY(x) (((x) & 0x3F) << 6)
+#define SPI_RX_TAP_DELAY(x) (((x) & 0x3F) << 0)
+
+#define SPI_CS_TIMING1 0x008
+#define SPI_SETUP_HOLD(setup, hold) (((setup) << 4) | (hold))
+#define SPI_CS_SETUP_HOLD(reg, cs, val) \
+ ((((val) & 0xFFu) << ((cs) * 8)) | \
+ ((reg) & ~(0xFFu << ((cs) * 8))))
+
+#define SPI_CS_TIMING2 0x00C
+#define CYCLES_BETWEEN_PACKETS_0(x) (((x) & 0x1F) << 0)
+#define CS_ACTIVE_BETWEEN_PACKETS_0 (1 << 5)
+#define CYCLES_BETWEEN_PACKETS_1(x) (((x) & 0x1F) << 8)
+#define CS_ACTIVE_BETWEEN_PACKETS_1 (1 << 13)
+#define CYCLES_BETWEEN_PACKETS_2(x) (((x) & 0x1F) << 16)
+#define CS_ACTIVE_BETWEEN_PACKETS_2 (1 << 21)
+#define CYCLES_BETWEEN_PACKETS_3(x) (((x) & 0x1F) << 24)
+#define CS_ACTIVE_BETWEEN_PACKETS_3 (1 << 29)
+#define SPI_SET_CS_ACTIVE_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0x1) << ((cs) * 8 + 5)) | \
+ ((reg) & ~(1 << ((cs) * 8 + 5))))
+#define SPI_SET_CYCLES_BETWEEN_PACKETS(reg, cs, val) \
+ (reg = (((val) & 0xF) << ((cs) * 8)) | \
+ ((reg) & ~(0xF << ((cs) * 8))))
+
+#define SPI_TRANS_STATUS 0x010
+#define SPI_BLK_CNT(val) (((val) >> 0) & 0xFFFF)
+#define SPI_SLV_IDLE_COUNT(val) (((val) >> 16) & 0xFF)
+#define SPI_RDY (1 << 30)
+
+#define SPI_FIFO_STATUS 0x014
+#define SPI_RX_FIFO_EMPTY (1 << 0)
+#define SPI_RX_FIFO_FULL (1 << 1)
+#define SPI_TX_FIFO_EMPTY (1 << 2)
+#define SPI_TX_FIFO_FULL (1 << 3)
+#define SPI_RX_FIFO_UNF (1 << 4)
+#define SPI_RX_FIFO_OVF (1 << 5)
+#define SPI_TX_FIFO_UNF (1 << 6)
+#define SPI_TX_FIFO_OVF (1 << 7)
+#define SPI_ERR (1 << 8)
+#define SPI_TX_FIFO_FLUSH (1 << 14)
+#define SPI_RX_FIFO_FLUSH (1 << 15)
+#define SPI_TX_FIFO_EMPTY_COUNT(val) (((val) >> 16) & 0x7F)
+#define SPI_RX_FIFO_FULL_COUNT(val) (((val) >> 23) & 0x7F)
+#define SPI_FRAME_END (1 << 30)
+#define SPI_CS_INACTIVE (1 << 31)
+
+#define SPI_FIFO_ERROR (SPI_RX_FIFO_UNF | \
+ SPI_RX_FIFO_OVF | SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF)
+#define SPI_FIFO_EMPTY (SPI_RX_FIFO_EMPTY | SPI_TX_FIFO_EMPTY)
+
+#define SPI_TX_DATA 0x018
+#define SPI_RX_DATA 0x01C
+
+#define SPI_DMA_CTL 0x020
+#define SPI_TX_TRIG_1 (0 << 15)
+#define SPI_TX_TRIG_4 (1 << 15)
+#define SPI_TX_TRIG_8 (2 << 15)
+#define SPI_TX_TRIG_16 (3 << 15)
+#define SPI_TX_TRIG_MASK (3 << 15)
+#define SPI_RX_TRIG_1 (0 << 19)
+#define SPI_RX_TRIG_4 (1 << 19)
+#define SPI_RX_TRIG_8 (2 << 19)
+#define SPI_RX_TRIG_16 (3 << 19)
+#define SPI_RX_TRIG_MASK (3 << 19)
+#define SPI_IE_TX (1 << 28)
+#define SPI_IE_RX (1 << 29)
+#define SPI_CONT (1 << 30)
+#define SPI_DMA (1 << 31)
+#define SPI_DMA_EN SPI_DMA
+
+#define SPI_DMA_BLK 0x024
+#define SPI_DMA_BLK_SET(x) (((x) & 0xFFFF) << 0)
+
+#define SPI_TX_FIFO 0x108
+#define SPI_RX_FIFO 0x188
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 64
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SPI_TX_FIFO_EMPTY_COUNT(0x40)
+#define RX_FIFO_FULL_COUNT_ZERO SPI_RX_FIFO_FULL_COUNT(0)
+#define MAX_HOLD_CYCLES 16
+#define SPI_DEFAULT_SPEED 25000000
+
+struct tegra_spi_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ struct spi_device *cs_control;
+ unsigned cur_pos;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+
+ u32 command1_reg;
+ u32 dma_control_reg;
+ u32 def_command1_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_spi_runtime_suspend(struct device *dev);
+static int tegra_spi_runtime_resume(struct device *dev);
+
+static inline u32 tegra_spi_readl(struct tegra_spi_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_spi_writel(struct tegra_spi_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SPI_TX_FIFO)
+ readl(tspi->base + SPI_COMMAND1);
+}
+
+static void tegra_spi_clear_status(struct tegra_spi_data *tspi)
+{
+ u32 val;
+
+ /* Write 1 to clear status register */
+ val = tegra_spi_readl(tspi, SPI_TRANS_STATUS);
+ tegra_spi_writel(tspi, val, SPI_TRANS_STATUS);
+
+ /* Clear fifo status error if any */
+ val = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (val & SPI_ERR)
+ tegra_spi_writel(tspi, SPI_ERR | SPI_FIFO_ERROR,
+ SPI_FIFO_STATUS);
+}
+
+static unsigned tegra_spi_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_spi_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word = t->bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = (max_len + 3) / 4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_spi_fill_tx_fifo_from_client_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ tx_empty_count = SPI_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_spi_writel(tspi, x, SPI_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_spi_read_rx_fifo_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ rx_full_count = SPI_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO);
+
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_spi_readl(tspi, SPI_RX_FIFO) & rx_mask;
+
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_spi_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_spi_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_spi_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_spi_start_tx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_rx_dma(struct tegra_spi_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_spi_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_spi_start_dma_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u32 status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if ((status & SPI_FIFO_EMPTY) != SPI_FIFO_EMPTY) {
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
+ return -EIO;
+ }
+
+ val = SPI_DMA_BLK_SET(tspi->curr_dma_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SPI_TX_TRIG_1 | SPI_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SPI_TX_TRIG_4 | SPI_RX_TRIG_4;
+ else
+ val |= SPI_TX_TRIG_8 | SPI_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_spi_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ ret = tegra_spi_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_spi_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ tspi->dma_control_reg = val;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return ret;
+}
+
+static int tegra_spi_start_cpu_based_transfer(
+ struct tegra_spi_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_spi_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+
+ val = SPI_DMA_BLK_SET(cur_words - 1);
+ tegra_spi_writel(tspi, val, SPI_DMA_BLK);
+
+ val = 0;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TX;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RX;
+
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+
+ val |= SPI_DMA_EN;
+ tegra_spi_writel(tspi, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_spi_init_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SPI_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SPI_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_spi_deinit_dma_param(struct tegra_spi_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static u32 tegra_spi_setup_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed = t->speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+ u32 command1;
+ int req_mode;
+
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+
+ if (is_first_of_msg) {
+ tegra_spi_clear_status(tspi);
+
+ command1 = tspi->def_command1_reg;
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+
+ command1 &= ~SPI_CONTROL_MODE_MASK;
+ req_mode = spi->mode & 0x3;
+ if (req_mode == SPI_MODE_0)
+ command1 |= SPI_CONTROL_MODE_0;
+ else if (req_mode == SPI_MODE_1)
+ command1 |= SPI_CONTROL_MODE_1;
+ else if (req_mode == SPI_MODE_2)
+ command1 |= SPI_CONTROL_MODE_2;
+ else if (req_mode == SPI_MODE_3)
+ command1 |= SPI_CONTROL_MODE_3;
+
+ if (tspi->cs_control) {
+ if (tspi->cs_control != spi)
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->cs_control = NULL;
+ } else
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+
+ command1 |= SPI_CS_SW_HW;
+ if (spi->mode & SPI_CS_HIGH)
+ command1 |= SPI_CS_SS_VAL;
+ else
+ command1 &= ~SPI_CS_SS_VAL;
+
+ tegra_spi_writel(tspi, 0, SPI_COMMAND2);
+ } else {
+ command1 = tspi->command1_reg;
+ command1 &= ~SPI_BIT_LENGTH(~0);
+ command1 |= SPI_BIT_LENGTH(bits_per_word - 1);
+ }
+
+ return command1;
+}
+
+static int tegra_spi_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, u32 command1)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ unsigned total_fifo_words;
+ int ret;
+
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(spi, tspi, t);
+
+ if (tspi->is_packed)
+ command1 |= SPI_PACKED;
+
+ command1 &= ~(SPI_CS_SEL_MASK | SPI_TX_EN | SPI_RX_EN);
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command1 |= SPI_RX_EN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command1 |= SPI_TX_EN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ command1 |= SPI_CS_SEL(spi->chip_select);
+ tegra_spi_writel(tspi, command1, SPI_COMMAND1);
+ tspi->command1_reg = command1;
+
+ dev_dbg(tspi->dev, "The def 0x%x and written 0x%x\n",
+ tspi->def_command1_reg, (unsigned)command1);
+
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ ret = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_spi_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_spi_setup(struct spi_device *spi)
+{
+ struct tegra_spi_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command1_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val &= ~SPI_CS_POL_INACTIVE(spi->chip_select);
+ else
+ val |= SPI_CS_POL_INACTIVE(spi->chip_select);
+ tspi->def_command1_reg = val;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static void tegra_spi_transfer_delay(int delay)
+{
+ if (!delay)
+ return;
+
+ if (delay >= 1000)
+ mdelay(delay / 1000);
+
+ udelay(delay % 1000);
+}
+
+static int tegra_spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+ bool skip = false;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ u32 cmd1;
+
+ reinit_completion(&tspi->xfer_completion);
+
+ cmd1 = tegra_spi_setup_transfer_one(spi, xfer, is_first_msg);
+
+ if (!xfer->len) {
+ ret = 0;
+ skip = true;
+ goto complete_xfer;
+ }
+
+ ret = tegra_spi_start_transfer_one(spi, xfer, cmd1);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto complete_xfer;
+ }
+
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto complete_xfer;
+ }
+
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto complete_xfer;
+ }
+ msg->actual_length += xfer->len;
+
+complete_xfer:
+ if (ret < 0 || skip) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ goto exit;
+ } else if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ if (xfer->cs_change)
+ tspi->cs_control = spi;
+ else {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+ } else if (xfer->cs_change) {
+ tegra_spi_writel(tspi, tspi->def_command1_reg,
+ SPI_COMMAND1);
+ tegra_spi_transfer_delay(xfer->delay_usecs);
+ }
+
+ }
+ ret = 0;
+exit:
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status) {
+ dev_err(tspi->dev, "CpuXfer ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "CpuXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_spi_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_spi_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_spi_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SPI_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev, "DmaXfer: ERROR bit set 0x%x\n",
+ tspi->status_reg);
+ dev_err(tspi->dev, "DmaXfer 0x%08x:0x%08x\n",
+ tspi->command1_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_spi_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_spi_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SPI_FIFO_DEPTH)
+ err = tegra_spi_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_spi_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_spi_isr_thread(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_spi_isr(int irq, void *context_data)
+{
+ struct tegra_spi_data *tspi = context_data;
+
+ tspi->status_reg = tegra_spi_readl(tspi, SPI_FIFO_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SPI_TX_FIFO_UNF | SPI_TX_FIFO_OVF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SPI_RX_FIFO_OVF | SPI_RX_FIFO_UNF);
+ tegra_spi_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct of_device_id tegra_spi_of_match[] = {
+ { .compatible = "nvidia,tegra114-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_spi_of_match);
+
+static int tegra_spi_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_spi_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+
+ if (of_property_read_u32(pdev->dev.of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_spi_setup;
+ master->transfer_one_message = tegra_spi_transfer_one_message;
+ master->num_chipselect = MAX_CHIP_SELECT;
+ master->auto_runtime_pm = true;
+
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ spin_lock_init(&tspi->lock);
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_spi_isr,
+ tegra_spi_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SPI_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_spi_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_spi_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_spi_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command1_reg = SPI_M_S;
+ tegra_spi_writel(tspi, tspi->def_command1_reg, SPI_COMMAND1);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+ tegra_spi_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_spi_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+
+ if (tspi->tx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_spi_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_spi_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_spi_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_spi_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_spi_writel(tspi, tspi->command1_reg, SPI_COMMAND1);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_spi_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_spi_readl(tspi, SPI_COMMAND1);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_spi_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_spi_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops tegra_spi_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_spi_runtime_suspend,
+ tegra_spi_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_spi_suspend, tegra_spi_resume)
+};
+static struct platform_driver tegra_spi_driver = {
+ .driver = {
+ .name = "spi-tegra114",
+ .pm = &tegra_spi_pm_ops,
+ .of_match_table = tegra_spi_of_match,
+ },
+ .probe = tegra_spi_probe,
+ .remove = tegra_spi_remove,
+};
+module_platform_driver(tegra_spi_driver);
+
+MODULE_ALIAS("platform:spi-tegra114");
+MODULE_DESCRIPTION("NVIDIA Tegra114 SPI Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-sflash.c b/drivers/spi/spi-tegra20-sflash.c
new file mode 100644
index 000000000..b6558bb6f
--- /dev/null
+++ b/drivers/spi/spi-tegra20-sflash.c
@@ -0,0 +1,622 @@
+/*
+ * SPI driver for Nvidia's Tegra20 Serial Flash Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * Author: Laxman Dewangan <ldewangan@nvidia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SPI_COMMAND 0x000
+#define SPI_GO BIT(30)
+#define SPI_M_S BIT(28)
+#define SPI_ACTIVE_SCLK_MASK (0x3 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_LOW (0 << 26)
+#define SPI_ACTIVE_SCLK_DRIVE_HIGH (1 << 26)
+#define SPI_ACTIVE_SCLK_PULL_LOW (2 << 26)
+#define SPI_ACTIVE_SCLK_PULL_HIGH (3 << 26)
+
+#define SPI_CK_SDA_FALLING (1 << 21)
+#define SPI_CK_SDA_RISING (0 << 21)
+#define SPI_CK_SDA_MASK (1 << 21)
+#define SPI_ACTIVE_SDA (0x3 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_LOW (0 << 18)
+#define SPI_ACTIVE_SDA_DRIVE_HIGH (1 << 18)
+#define SPI_ACTIVE_SDA_PULL_LOW (2 << 18)
+#define SPI_ACTIVE_SDA_PULL_HIGH (3 << 18)
+
+#define SPI_CS_POL_INVERT BIT(16)
+#define SPI_TX_EN BIT(15)
+#define SPI_RX_EN BIT(14)
+#define SPI_CS_VAL_HIGH BIT(13)
+#define SPI_CS_VAL_LOW 0x0
+#define SPI_CS_SW BIT(12)
+#define SPI_CS_HW 0x0
+#define SPI_CS_DELAY_MASK (7 << 9)
+#define SPI_CS3_EN BIT(8)
+#define SPI_CS2_EN BIT(7)
+#define SPI_CS1_EN BIT(6)
+#define SPI_CS0_EN BIT(5)
+
+#define SPI_CS_MASK (SPI_CS3_EN | SPI_CS2_EN | \
+ SPI_CS1_EN | SPI_CS0_EN)
+#define SPI_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+
+#define SPI_MODES (SPI_ACTIVE_SCLK_MASK | SPI_CK_SDA_MASK)
+
+#define SPI_STATUS 0x004
+#define SPI_BSY BIT(31)
+#define SPI_RDY BIT(30)
+#define SPI_TXF_FLUSH BIT(29)
+#define SPI_RXF_FLUSH BIT(28)
+#define SPI_RX_UNF BIT(27)
+#define SPI_TX_OVF BIT(26)
+#define SPI_RXF_EMPTY BIT(25)
+#define SPI_RXF_FULL BIT(24)
+#define SPI_TXF_EMPTY BIT(23)
+#define SPI_TXF_FULL BIT(22)
+#define SPI_BLK_CNT(count) (((count) & 0xffff) + 1)
+
+#define SPI_FIFO_ERROR (SPI_RX_UNF | SPI_TX_OVF)
+#define SPI_FIFO_EMPTY (SPI_TX_EMPTY | SPI_RX_EMPTY)
+
+#define SPI_RX_CMP 0x8
+#define SPI_DMA_CTL 0x0C
+#define SPI_DMA_EN BIT(31)
+#define SPI_IE_RXC BIT(27)
+#define SPI_IE_TXC BIT(26)
+#define SPI_PACKED BIT(20)
+#define SPI_RX_TRIG_MASK (0x3 << 18)
+#define SPI_RX_TRIG_1W (0x0 << 18)
+#define SPI_RX_TRIG_4W (0x1 << 18)
+#define SPI_TX_TRIG_MASK (0x3 << 16)
+#define SPI_TX_TRIG_1W (0x0 << 16)
+#define SPI_TX_TRIG_4W (0x1 << 16)
+#define SPI_DMA_BLK_COUNT(count) (((count) - 1) & 0xFFFF)
+
+#define SPI_TX_FIFO 0x10
+#define SPI_RX_FIFO 0x20
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define MAX_CHIP_SELECT 4
+#define SPI_FIFO_DEPTH 4
+#define SPI_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+struct tegra_sflash_data {
+ struct device *dev;
+ struct spi_master *master;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned bytes_per_word;
+ unsigned cur_direction;
+ unsigned curr_xfer_words;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+
+ u32 def_command_reg;
+ u32 command_reg;
+ u32 dma_control_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+};
+
+static int tegra_sflash_runtime_suspend(struct device *dev);
+static int tegra_sflash_runtime_resume(struct device *dev);
+
+static inline u32 tegra_sflash_readl(struct tegra_sflash_data *tsd,
+ unsigned long reg)
+{
+ return readl(tsd->base + reg);
+}
+
+static inline void tegra_sflash_writel(struct tegra_sflash_data *tsd,
+ u32 val, unsigned long reg)
+{
+ writel(val, tsd->base + reg);
+}
+
+static void tegra_sflash_clear_status(struct tegra_sflash_data *tsd)
+{
+ /* Write 1 to clear status register */
+ tegra_sflash_writel(tsd, SPI_RDY | SPI_FIFO_ERROR, SPI_STATUS);
+}
+
+static unsigned tegra_sflash_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_sflash_data *tsd,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tsd->cur_pos;
+ unsigned max_word;
+
+ tsd->bytes_per_word = DIV_ROUND_UP(t->bits_per_word, 8);
+ max_word = remain_len / tsd->bytes_per_word;
+ if (max_word > SPI_FIFO_DEPTH)
+ max_word = SPI_FIFO_DEPTH;
+ tsd->curr_xfer_words = max_word;
+ return max_word;
+}
+
+static unsigned tegra_sflash_fill_tx_fifo_from_client_txbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ u32 status;
+ unsigned max_n_32bit = tsd->curr_xfer_words;
+ u8 *tx_buf = (u8 *)t->tx_buf + tsd->cur_tx_pos;
+
+ if (max_n_32bit > SPI_FIFO_DEPTH)
+ max_n_32bit = SPI_FIFO_DEPTH;
+ nbytes = max_n_32bit * tsd->bytes_per_word;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_TXF_FULL)) {
+ int i;
+ u32 x = 0;
+
+ for (i = 0; nbytes && (i < tsd->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_sflash_writel(tsd, x, SPI_TX_FIFO);
+ if (!nbytes)
+ break;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_tx_pos += max_n_32bit * tsd->bytes_per_word;
+ return max_n_32bit;
+}
+
+static int tegra_sflash_read_rx_fifo_to_client_rxbuf(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 status;
+ unsigned int read_words = 0;
+ u8 *rx_buf = (u8 *)t->rx_buf + tsd->cur_rx_pos;
+
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ while (!(status & SPI_RXF_EMPTY)) {
+ int i;
+ u32 x = tegra_sflash_readl(tsd, SPI_RX_FIFO);
+
+ for (i = 0; (i < tsd->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ read_words++;
+ status = tegra_sflash_readl(tsd, SPI_STATUS);
+ }
+ tsd->cur_rx_pos += read_words * tsd->bytes_per_word;
+ return 0;
+}
+
+static int tegra_sflash_start_cpu_based_transfer(
+ struct tegra_sflash_data *tsd, struct spi_transfer *t)
+{
+ u32 val = 0;
+ unsigned cur_words;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ val |= SPI_IE_TXC;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ val |= SPI_IE_RXC;
+
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_sflash_fill_tx_fifo_from_client_txbuf(tsd, t);
+ else
+ cur_words = tsd->curr_xfer_words;
+ val |= SPI_DMA_BLK_COUNT(cur_words);
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ tsd->dma_control_reg = val;
+ val |= SPI_DMA_EN;
+ tegra_sflash_writel(tsd, val, SPI_DMA_CTL);
+ return 0;
+}
+
+static int tegra_sflash_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t, bool is_first_of_msg,
+ bool is_single_xfer)
+{
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u32 command;
+
+ speed = t->speed_hz;
+ if (speed != tsd->cur_speed) {
+ clk_set_rate(tsd->clk, speed);
+ tsd->cur_speed = speed;
+ }
+
+ tsd->cur_spi = spi;
+ tsd->cur_pos = 0;
+ tsd->cur_rx_pos = 0;
+ tsd->cur_tx_pos = 0;
+ tsd->curr_xfer = t;
+ tegra_sflash_calculate_curr_xfer_param(spi, tsd, t);
+ if (is_first_of_msg) {
+ command = tsd->def_command_reg;
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command |= SPI_CS_VAL_HIGH;
+
+ command &= ~SPI_MODES;
+ if (spi->mode & SPI_CPHA)
+ command |= SPI_CK_SDA_FALLING;
+
+ if (spi->mode & SPI_CPOL)
+ command |= SPI_ACTIVE_SCLK_DRIVE_HIGH;
+ else
+ command |= SPI_ACTIVE_SCLK_DRIVE_LOW;
+ command |= SPI_CS0_EN << spi->chip_select;
+ } else {
+ command = tsd->command_reg;
+ command &= ~SPI_BIT_LENGTH(~0);
+ command |= SPI_BIT_LENGTH(t->bits_per_word - 1);
+ command &= ~(SPI_RX_EN | SPI_TX_EN);
+ }
+
+ tsd->cur_direction = 0;
+ if (t->rx_buf) {
+ command |= SPI_RX_EN;
+ tsd->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command |= SPI_TX_EN;
+ tsd->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_sflash_writel(tsd, command, SPI_COMMAND);
+ tsd->command_reg = command;
+
+ return tegra_sflash_start_cpu_based_transfer(tsd, t);
+}
+
+static int tegra_sflash_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ bool is_first_msg = true;
+ int single_xfer;
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ struct spi_transfer *xfer;
+ struct spi_device *spi = msg->spi;
+ int ret;
+
+ msg->status = 0;
+ msg->actual_length = 0;
+ single_xfer = list_is_singular(&msg->transfers);
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ reinit_completion(&tsd->xfer_completion);
+ ret = tegra_sflash_start_transfer_one(spi, xfer,
+ is_first_msg, single_xfer);
+ if (ret < 0) {
+ dev_err(tsd->dev,
+ "spi can not start transfer, err %d\n", ret);
+ goto exit;
+ }
+ is_first_msg = false;
+ ret = wait_for_completion_timeout(&tsd->xfer_completion,
+ SPI_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tsd->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ ret = -EIO;
+ goto exit;
+ }
+
+ if (tsd->tx_status || tsd->rx_status) {
+ dev_err(tsd->dev, "Error in Transfer\n");
+ ret = -EIO;
+ goto exit;
+ }
+ msg->actual_length += xfer->len;
+ if (xfer->cs_change && xfer->delay_usecs) {
+ tegra_sflash_writel(tsd, tsd->def_command_reg,
+ SPI_COMMAND);
+ udelay(xfer->delay_usecs);
+ }
+ }
+ ret = 0;
+exit:
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ msg->status = ret;
+ spi_finalize_current_message(master);
+ return ret;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_sflash_data *tsd)
+{
+ struct spi_transfer *t = tsd->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tsd->lock, flags);
+ if (tsd->tx_status || tsd->rx_status || (tsd->status_reg & SPI_BSY)) {
+ dev_err(tsd->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tsd->status_reg);
+ dev_err(tsd->dev,
+ "CpuXfer 0x%08x:0x%08x\n", tsd->command_reg,
+ tsd->dma_control_reg);
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tegra_sflash_read_rx_fifo_to_client_rxbuf(tsd, t);
+
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->cur_pos = tsd->cur_tx_pos;
+ else
+ tsd->cur_pos = tsd->cur_rx_pos;
+
+ if (tsd->cur_pos == t->len) {
+ complete(&tsd->xfer_completion);
+ goto exit;
+ }
+
+ tegra_sflash_calculate_curr_xfer_param(tsd->cur_spi, tsd, t);
+ tegra_sflash_start_cpu_based_transfer(tsd, t);
+exit:
+ spin_unlock_irqrestore(&tsd->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_sflash_isr(int irq, void *context_data)
+{
+ struct tegra_sflash_data *tsd = context_data;
+
+ tsd->status_reg = tegra_sflash_readl(tsd, SPI_STATUS);
+ if (tsd->cur_direction & DATA_DIR_TX)
+ tsd->tx_status = tsd->status_reg & SPI_TX_OVF;
+
+ if (tsd->cur_direction & DATA_DIR_RX)
+ tsd->rx_status = tsd->status_reg & SPI_RX_UNF;
+ tegra_sflash_clear_status(tsd);
+
+ return handle_cpu_based_xfer(tsd);
+}
+
+static const struct of_device_id tegra_sflash_of_match[] = {
+ { .compatible = "nvidia,tegra20-sflash", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_sflash_of_match);
+
+static int tegra_sflash_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_sflash_data *tsd;
+ struct resource *r;
+ int ret;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_sflash_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tsd));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA;
+ master->transfer_one_message = tegra_sflash_transfer_one_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tsd = spi_master_get_devdata(master);
+ tsd->master = master;
+ tsd->dev = &pdev->dev;
+ spin_lock_init(&tsd->lock);
+
+ if (of_property_read_u32(tsd->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ tsd->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tsd->base)) {
+ ret = PTR_ERR(tsd->base);
+ goto exit_free_master;
+ }
+
+ tsd->irq = platform_get_irq(pdev, 0);
+ ret = request_irq(tsd->irq, tegra_sflash_isr, 0,
+ dev_name(&pdev->dev), tsd);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tsd->irq);
+ goto exit_free_master;
+ }
+
+ tsd->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tsd->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tsd->clk);
+ goto exit_free_irq;
+ }
+
+ tsd->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tsd->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tsd->rst);
+ goto exit_free_irq;
+ }
+
+ init_completion(&tsd->xfer_completion);
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_sflash_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+
+ /* Reset controller */
+ reset_control_assert(tsd->rst);
+ udelay(2);
+ reset_control_deassert(tsd->rst);
+
+ tsd->def_command_reg = SPI_M_S | SPI_CS_SW;
+ tegra_sflash_writel(tsd, tsd->def_command_reg, SPI_COMMAND);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+exit_free_irq:
+ free_irq(tsd->irq, tsd);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_sflash_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ free_irq(tsd->irq, tsd);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_sflash_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_sflash_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_sflash_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_sflash_writel(tsd, tsd->command_reg, SPI_COMMAND);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_sflash_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_sflash_readl(tsd, SPI_COMMAND);
+
+ clk_disable_unprepare(tsd->clk);
+ return 0;
+}
+
+static int tegra_sflash_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_sflash_data *tsd = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tsd->clk);
+ if (ret < 0) {
+ dev_err(tsd->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_sflash_runtime_suspend,
+ tegra_sflash_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_sflash_suspend, tegra_sflash_resume)
+};
+static struct platform_driver tegra_sflash_driver = {
+ .driver = {
+ .name = "spi-tegra-sflash",
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_sflash_of_match,
+ },
+ .probe = tegra_sflash_probe,
+ .remove = tegra_sflash_remove,
+};
+module_platform_driver(tegra_sflash_driver);
+
+MODULE_ALIAS("platform:spi-tegra-sflash");
+MODULE_DESCRIPTION("NVIDIA Tegra20 Serial Flash Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
new file mode 100644
index 000000000..85c91f58b
--- /dev/null
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -0,0 +1,1238 @@
+/*
+ * SPI driver for Nvidia's Tegra20/Tegra30 SLINK Controller.
+ *
+ * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/clk.h>
+#include <linux/completion.h>
+#include <linux/delay.h>
+#include <linux/dmaengine.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/kthread.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include <linux/spi/spi.h>
+
+#define SLINK_COMMAND 0x000
+#define SLINK_BIT_LENGTH(x) (((x) & 0x1f) << 0)
+#define SLINK_WORD_SIZE(x) (((x) & 0x1f) << 5)
+#define SLINK_BOTH_EN (1 << 10)
+#define SLINK_CS_SW (1 << 11)
+#define SLINK_CS_VALUE (1 << 12)
+#define SLINK_CS_POLARITY (1 << 13)
+#define SLINK_IDLE_SDA_DRIVE_LOW (0 << 16)
+#define SLINK_IDLE_SDA_DRIVE_HIGH (1 << 16)
+#define SLINK_IDLE_SDA_PULL_LOW (2 << 16)
+#define SLINK_IDLE_SDA_PULL_HIGH (3 << 16)
+#define SLINK_IDLE_SDA_MASK (3 << 16)
+#define SLINK_CS_POLARITY1 (1 << 20)
+#define SLINK_CK_SDA (1 << 21)
+#define SLINK_CS_POLARITY2 (1 << 22)
+#define SLINK_CS_POLARITY3 (1 << 23)
+#define SLINK_IDLE_SCLK_DRIVE_LOW (0 << 24)
+#define SLINK_IDLE_SCLK_DRIVE_HIGH (1 << 24)
+#define SLINK_IDLE_SCLK_PULL_LOW (2 << 24)
+#define SLINK_IDLE_SCLK_PULL_HIGH (3 << 24)
+#define SLINK_IDLE_SCLK_MASK (3 << 24)
+#define SLINK_M_S (1 << 28)
+#define SLINK_WAIT (1 << 29)
+#define SLINK_GO (1 << 30)
+#define SLINK_ENB (1 << 31)
+
+#define SLINK_MODES (SLINK_IDLE_SCLK_MASK | SLINK_CK_SDA)
+
+#define SLINK_COMMAND2 0x004
+#define SLINK_LSBFE (1 << 0)
+#define SLINK_SSOE (1 << 1)
+#define SLINK_SPIE (1 << 4)
+#define SLINK_BIDIROE (1 << 6)
+#define SLINK_MODFEN (1 << 7)
+#define SLINK_INT_SIZE(x) (((x) & 0x1f) << 8)
+#define SLINK_CS_ACTIVE_BETWEEN (1 << 17)
+#define SLINK_SS_EN_CS(x) (((x) & 0x3) << 18)
+#define SLINK_SS_SETUP(x) (((x) & 0x3) << 20)
+#define SLINK_FIFO_REFILLS_0 (0 << 22)
+#define SLINK_FIFO_REFILLS_1 (1 << 22)
+#define SLINK_FIFO_REFILLS_2 (2 << 22)
+#define SLINK_FIFO_REFILLS_3 (3 << 22)
+#define SLINK_FIFO_REFILLS_MASK (3 << 22)
+#define SLINK_WAIT_PACK_INT(x) (((x) & 0x7) << 26)
+#define SLINK_SPC0 (1 << 29)
+#define SLINK_TXEN (1 << 30)
+#define SLINK_RXEN (1 << 31)
+
+#define SLINK_STATUS 0x008
+#define SLINK_COUNT(val) (((val) >> 0) & 0x1f)
+#define SLINK_WORD(val) (((val) >> 5) & 0x1f)
+#define SLINK_BLK_CNT(val) (((val) >> 0) & 0xffff)
+#define SLINK_MODF (1 << 16)
+#define SLINK_RX_UNF (1 << 18)
+#define SLINK_TX_OVF (1 << 19)
+#define SLINK_TX_FULL (1 << 20)
+#define SLINK_TX_EMPTY (1 << 21)
+#define SLINK_RX_FULL (1 << 22)
+#define SLINK_RX_EMPTY (1 << 23)
+#define SLINK_TX_UNF (1 << 24)
+#define SLINK_RX_OVF (1 << 25)
+#define SLINK_TX_FLUSH (1 << 26)
+#define SLINK_RX_FLUSH (1 << 27)
+#define SLINK_SCLK (1 << 28)
+#define SLINK_ERR (1 << 29)
+#define SLINK_RDY (1 << 30)
+#define SLINK_BSY (1 << 31)
+#define SLINK_FIFO_ERROR (SLINK_TX_OVF | SLINK_RX_UNF | \
+ SLINK_TX_UNF | SLINK_RX_OVF)
+
+#define SLINK_FIFO_EMPTY (SLINK_TX_EMPTY | SLINK_RX_EMPTY)
+
+#define SLINK_MAS_DATA 0x010
+#define SLINK_SLAVE_DATA 0x014
+
+#define SLINK_DMA_CTL 0x018
+#define SLINK_DMA_BLOCK_SIZE(x) (((x) & 0xffff) << 0)
+#define SLINK_TX_TRIG_1 (0 << 16)
+#define SLINK_TX_TRIG_4 (1 << 16)
+#define SLINK_TX_TRIG_8 (2 << 16)
+#define SLINK_TX_TRIG_16 (3 << 16)
+#define SLINK_TX_TRIG_MASK (3 << 16)
+#define SLINK_RX_TRIG_1 (0 << 18)
+#define SLINK_RX_TRIG_4 (1 << 18)
+#define SLINK_RX_TRIG_8 (2 << 18)
+#define SLINK_RX_TRIG_16 (3 << 18)
+#define SLINK_RX_TRIG_MASK (3 << 18)
+#define SLINK_PACKED (1 << 20)
+#define SLINK_PACK_SIZE_4 (0 << 21)
+#define SLINK_PACK_SIZE_8 (1 << 21)
+#define SLINK_PACK_SIZE_16 (2 << 21)
+#define SLINK_PACK_SIZE_32 (3 << 21)
+#define SLINK_PACK_SIZE_MASK (3 << 21)
+#define SLINK_IE_TXC (1 << 26)
+#define SLINK_IE_RXC (1 << 27)
+#define SLINK_DMA_EN (1 << 31)
+
+#define SLINK_STATUS2 0x01c
+#define SLINK_TX_FIFO_EMPTY_COUNT(val) (((val) & 0x3f) >> 0)
+#define SLINK_RX_FIFO_FULL_COUNT(val) (((val) & 0x3f0000) >> 16)
+#define SLINK_SS_HOLD_TIME(val) (((val) & 0xF) << 6)
+
+#define SLINK_TX_FIFO 0x100
+#define SLINK_RX_FIFO 0x180
+
+#define DATA_DIR_TX (1 << 0)
+#define DATA_DIR_RX (1 << 1)
+
+#define SLINK_DMA_TIMEOUT (msecs_to_jiffies(1000))
+
+#define DEFAULT_SPI_DMA_BUF_LEN (16*1024)
+#define TX_FIFO_EMPTY_COUNT_MAX SLINK_TX_FIFO_EMPTY_COUNT(0x20)
+#define RX_FIFO_FULL_COUNT_ZERO SLINK_RX_FIFO_FULL_COUNT(0)
+
+#define SLINK_STATUS2_RESET \
+ (TX_FIFO_EMPTY_COUNT_MAX | RX_FIFO_FULL_COUNT_ZERO << 16)
+
+#define MAX_CHIP_SELECT 4
+#define SLINK_FIFO_DEPTH 32
+
+struct tegra_slink_chip_data {
+ bool cs_hold_time;
+};
+
+struct tegra_slink_data {
+ struct device *dev;
+ struct spi_master *master;
+ const struct tegra_slink_chip_data *chip_data;
+ spinlock_t lock;
+
+ struct clk *clk;
+ struct reset_control *rst;
+ void __iomem *base;
+ phys_addr_t phys;
+ unsigned irq;
+ u32 cur_speed;
+
+ struct spi_device *cur_spi;
+ unsigned cur_pos;
+ unsigned cur_len;
+ unsigned words_per_32bit;
+ unsigned bytes_per_word;
+ unsigned curr_dma_words;
+ unsigned cur_direction;
+
+ unsigned cur_rx_pos;
+ unsigned cur_tx_pos;
+
+ unsigned dma_buf_size;
+ unsigned max_buf_size;
+ bool is_curr_dma_xfer;
+
+ struct completion rx_dma_complete;
+ struct completion tx_dma_complete;
+
+ u32 tx_status;
+ u32 rx_status;
+ u32 status_reg;
+ bool is_packed;
+ u32 packed_size;
+
+ u32 command_reg;
+ u32 command2_reg;
+ u32 dma_control_reg;
+ u32 def_command_reg;
+ u32 def_command2_reg;
+
+ struct completion xfer_completion;
+ struct spi_transfer *curr_xfer;
+ struct dma_chan *rx_dma_chan;
+ u32 *rx_dma_buf;
+ dma_addr_t rx_dma_phys;
+ struct dma_async_tx_descriptor *rx_dma_desc;
+
+ struct dma_chan *tx_dma_chan;
+ u32 *tx_dma_buf;
+ dma_addr_t tx_dma_phys;
+ struct dma_async_tx_descriptor *tx_dma_desc;
+};
+
+static int tegra_slink_runtime_suspend(struct device *dev);
+static int tegra_slink_runtime_resume(struct device *dev);
+
+static inline u32 tegra_slink_readl(struct tegra_slink_data *tspi,
+ unsigned long reg)
+{
+ return readl(tspi->base + reg);
+}
+
+static inline void tegra_slink_writel(struct tegra_slink_data *tspi,
+ u32 val, unsigned long reg)
+{
+ writel(val, tspi->base + reg);
+
+ /* Read back register to make sure that register writes completed */
+ if (reg != SLINK_TX_FIFO)
+ readl(tspi->base + SLINK_MAS_DATA);
+}
+
+static void tegra_slink_clear_status(struct tegra_slink_data *tspi)
+{
+ u32 val_write;
+
+ tegra_slink_readl(tspi, SLINK_STATUS);
+
+ /* Write 1 to clear status register */
+ val_write = SLINK_RDY | SLINK_FIFO_ERROR;
+ tegra_slink_writel(tspi, val_write, SLINK_STATUS);
+}
+
+static u32 tegra_slink_get_packed_size(struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ switch (tspi->bytes_per_word) {
+ case 0:
+ return SLINK_PACK_SIZE_4;
+ case 1:
+ return SLINK_PACK_SIZE_8;
+ case 2:
+ return SLINK_PACK_SIZE_16;
+ case 4:
+ return SLINK_PACK_SIZE_32;
+ default:
+ return 0;
+ }
+}
+
+static unsigned tegra_slink_calculate_curr_xfer_param(
+ struct spi_device *spi, struct tegra_slink_data *tspi,
+ struct spi_transfer *t)
+{
+ unsigned remain_len = t->len - tspi->cur_pos;
+ unsigned max_word;
+ unsigned bits_per_word;
+ unsigned max_len;
+ unsigned total_fifo_words;
+
+ bits_per_word = t->bits_per_word;
+ tspi->bytes_per_word = DIV_ROUND_UP(bits_per_word, 8);
+
+ if (bits_per_word == 8 || bits_per_word == 16) {
+ tspi->is_packed = 1;
+ tspi->words_per_32bit = 32/bits_per_word;
+ } else {
+ tspi->is_packed = 0;
+ tspi->words_per_32bit = 1;
+ }
+ tspi->packed_size = tegra_slink_get_packed_size(tspi, t);
+
+ if (tspi->is_packed) {
+ max_len = min(remain_len, tspi->max_buf_size);
+ tspi->curr_dma_words = max_len/tspi->bytes_per_word;
+ total_fifo_words = max_len/4;
+ } else {
+ max_word = (remain_len - 1) / tspi->bytes_per_word + 1;
+ max_word = min(max_word, tspi->max_buf_size/4);
+ tspi->curr_dma_words = max_word;
+ total_fifo_words = max_word;
+ }
+ return total_fifo_words;
+}
+
+static unsigned tegra_slink_fill_tx_fifo_from_client_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned nbytes;
+ unsigned tx_empty_count;
+ u32 fifo_status;
+ unsigned max_n_32bit;
+ unsigned i, count;
+ unsigned int written_words;
+ unsigned fifo_words_left;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ tx_empty_count = SLINK_TX_FIFO_EMPTY_COUNT(fifo_status);
+
+ if (tspi->is_packed) {
+ fifo_words_left = tx_empty_count * tspi->words_per_32bit;
+ written_words = min(fifo_words_left, tspi->curr_dma_words);
+ nbytes = written_words * tspi->bytes_per_word;
+ max_n_32bit = DIV_ROUND_UP(nbytes, 4);
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; (i < 4) && nbytes; i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ } else {
+ max_n_32bit = min(tspi->curr_dma_words, tx_empty_count);
+ written_words = max_n_32bit;
+ nbytes = written_words * tspi->bytes_per_word;
+ for (count = 0; count < max_n_32bit; count++) {
+ u32 x = 0;
+ for (i = 0; nbytes && (i < tspi->bytes_per_word);
+ i++, nbytes--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tegra_slink_writel(tspi, x, SLINK_TX_FIFO);
+ }
+ }
+ tspi->cur_tx_pos += written_words * tspi->bytes_per_word;
+ return written_words;
+}
+
+static unsigned int tegra_slink_read_rx_fifo_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned rx_full_count;
+ u32 fifo_status;
+ unsigned i, count;
+ unsigned int read_words = 0;
+ unsigned len;
+ u8 *rx_buf = (u8 *)t->rx_buf + tspi->cur_rx_pos;
+
+ fifo_status = tegra_slink_readl(tspi, SLINK_STATUS2);
+ rx_full_count = SLINK_RX_FIFO_FULL_COUNT(fifo_status);
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; len && (i < 4); i++, len--)
+ *rx_buf++ = (x >> i*8) & 0xFF;
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+ read_words += tspi->curr_dma_words;
+ } else {
+ for (count = 0; count < rx_full_count; count++) {
+ u32 x = tegra_slink_readl(tspi, SLINK_RX_FIFO);
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ tspi->cur_rx_pos += rx_full_count * tspi->bytes_per_word;
+ read_words += rx_full_count;
+ }
+ return read_words;
+}
+
+static void tegra_slink_copy_client_txbuf_to_spi_txbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+
+ if (tspi->is_packed) {
+ unsigned len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(tspi->tx_dma_buf, t->tx_buf + tspi->cur_pos, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ u8 *tx_buf = (u8 *)t->tx_buf + tspi->cur_tx_pos;
+ unsigned consume = tspi->curr_dma_words * tspi->bytes_per_word;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = 0;
+ for (i = 0; consume && (i < tspi->bytes_per_word);
+ i++, consume--)
+ x |= (u32)(*tx_buf++) << (i * 8);
+ tspi->tx_dma_buf[count] = x;
+ }
+ }
+ tspi->cur_tx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->tx_dma_phys,
+ tspi->dma_buf_size, DMA_TO_DEVICE);
+}
+
+static void tegra_slink_copy_spi_rxbuf_to_client_rxbuf(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ unsigned len;
+
+ /* Make the dma buffer to read by cpu */
+ dma_sync_single_for_cpu(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ if (tspi->is_packed) {
+ len = tspi->curr_dma_words * tspi->bytes_per_word;
+ memcpy(t->rx_buf + tspi->cur_rx_pos, tspi->rx_dma_buf, len);
+ } else {
+ unsigned int i;
+ unsigned int count;
+ unsigned char *rx_buf = t->rx_buf + tspi->cur_rx_pos;
+ u32 rx_mask = ((u32)1 << t->bits_per_word) - 1;
+
+ for (count = 0; count < tspi->curr_dma_words; count++) {
+ u32 x = tspi->rx_dma_buf[count] & rx_mask;
+ for (i = 0; (i < tspi->bytes_per_word); i++)
+ *rx_buf++ = (x >> (i*8)) & 0xFF;
+ }
+ }
+ tspi->cur_rx_pos += tspi->curr_dma_words * tspi->bytes_per_word;
+
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+}
+
+static void tegra_slink_dma_complete(void *args)
+{
+ struct completion *dma_complete = args;
+
+ complete(dma_complete);
+}
+
+static int tegra_slink_start_tx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->tx_dma_complete);
+ tspi->tx_dma_desc = dmaengine_prep_slave_single(tspi->tx_dma_chan,
+ tspi->tx_dma_phys, len, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->tx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Tx\n");
+ return -EIO;
+ }
+
+ tspi->tx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->tx_dma_desc->callback_param = &tspi->tx_dma_complete;
+
+ dmaengine_submit(tspi->tx_dma_desc);
+ dma_async_issue_pending(tspi->tx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_rx_dma(struct tegra_slink_data *tspi, int len)
+{
+ reinit_completion(&tspi->rx_dma_complete);
+ tspi->rx_dma_desc = dmaengine_prep_slave_single(tspi->rx_dma_chan,
+ tspi->rx_dma_phys, len, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!tspi->rx_dma_desc) {
+ dev_err(tspi->dev, "Not able to get desc for Rx\n");
+ return -EIO;
+ }
+
+ tspi->rx_dma_desc->callback = tegra_slink_dma_complete;
+ tspi->rx_dma_desc->callback_param = &tspi->rx_dma_complete;
+
+ dmaengine_submit(tspi->rx_dma_desc);
+ dma_async_issue_pending(tspi->rx_dma_chan);
+ return 0;
+}
+
+static int tegra_slink_start_dma_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned int len;
+ int ret = 0;
+ u32 status;
+
+ /* Make sure that Rx and Tx fifo are empty */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ if ((status & SLINK_FIFO_EMPTY) != SLINK_FIFO_EMPTY) {
+ dev_err(tspi->dev, "Rx/Tx fifo are not empty status 0x%08x\n",
+ (unsigned)status);
+ return -EIO;
+ }
+
+ val = SLINK_DMA_BLOCK_SIZE(tspi->curr_dma_words - 1);
+ val |= tspi->packed_size;
+ if (tspi->is_packed)
+ len = DIV_ROUND_UP(tspi->curr_dma_words * tspi->bytes_per_word,
+ 4) * 4;
+ else
+ len = tspi->curr_dma_words * 4;
+
+ /* Set attention level based on length of transfer */
+ if (len & 0xF)
+ val |= SLINK_TX_TRIG_1 | SLINK_RX_TRIG_1;
+ else if (((len) >> 4) & 0x1)
+ val |= SLINK_TX_TRIG_4 | SLINK_RX_TRIG_4;
+ else
+ val |= SLINK_TX_TRIG_8 | SLINK_RX_TRIG_8;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ tegra_slink_copy_client_txbuf_to_spi_txbuf(tspi, t);
+ wmb();
+ ret = tegra_slink_start_tx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting tx dma failed, err %d\n", ret);
+ return ret;
+ }
+
+ /* Wait for tx fifo to be fill before starting slink */
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ while (!(status & SLINK_TX_FULL))
+ status = tegra_slink_readl(tspi, SLINK_STATUS);
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ /* Make the dma buffer to read by dma */
+ dma_sync_single_for_device(tspi->dev, tspi->rx_dma_phys,
+ tspi->dma_buf_size, DMA_FROM_DEVICE);
+
+ ret = tegra_slink_start_rx_dma(tspi, len);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "Starting rx dma failed, err %d\n", ret);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ return ret;
+ }
+ }
+ tspi->is_curr_dma_xfer = true;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ /* HW need small delay after settign Packed mode */
+ udelay(1);
+ }
+ tspi->dma_control_reg = val;
+
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return ret;
+}
+
+static int tegra_slink_start_cpu_based_transfer(
+ struct tegra_slink_data *tspi, struct spi_transfer *t)
+{
+ u32 val;
+ unsigned cur_words;
+
+ val = tspi->packed_size;
+ if (tspi->cur_direction & DATA_DIR_TX)
+ val |= SLINK_IE_TXC;
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ val |= SLINK_IE_RXC;
+
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ cur_words = tegra_slink_fill_tx_fifo_from_client_txbuf(tspi, t);
+ else
+ cur_words = tspi->curr_dma_words;
+ val |= SLINK_DMA_BLOCK_SIZE(cur_words - 1);
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ tspi->dma_control_reg = val;
+
+ tspi->is_curr_dma_xfer = false;
+ if (tspi->is_packed) {
+ val |= SLINK_PACKED;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ udelay(1);
+ wmb();
+ }
+ tspi->dma_control_reg = val;
+ val |= SLINK_DMA_EN;
+ tegra_slink_writel(tspi, val, SLINK_DMA_CTL);
+ return 0;
+}
+
+static int tegra_slink_init_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ struct dma_chan *dma_chan;
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ int ret;
+ struct dma_slave_config dma_sconfig;
+
+ dma_chan = dma_request_slave_channel_reason(tspi->dev,
+ dma_to_memory ? "rx" : "tx");
+ if (IS_ERR(dma_chan)) {
+ ret = PTR_ERR(dma_chan);
+ if (ret != -EPROBE_DEFER)
+ dev_err(tspi->dev,
+ "Dma channel is not available: %d\n", ret);
+ return ret;
+ }
+
+ dma_buf = dma_alloc_coherent(tspi->dev, tspi->dma_buf_size,
+ &dma_phys, GFP_KERNEL);
+ if (!dma_buf) {
+ dev_err(tspi->dev, " Not able to allocate the dma buffer\n");
+ dma_release_channel(dma_chan);
+ return -ENOMEM;
+ }
+
+ if (dma_to_memory) {
+ dma_sconfig.src_addr = tspi->phys + SLINK_RX_FIFO;
+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.src_maxburst = 0;
+ } else {
+ dma_sconfig.dst_addr = tspi->phys + SLINK_TX_FIFO;
+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
+ dma_sconfig.dst_maxburst = 0;
+ }
+
+ ret = dmaengine_slave_config(dma_chan, &dma_sconfig);
+ if (ret)
+ goto scrub;
+ if (dma_to_memory) {
+ tspi->rx_dma_chan = dma_chan;
+ tspi->rx_dma_buf = dma_buf;
+ tspi->rx_dma_phys = dma_phys;
+ } else {
+ tspi->tx_dma_chan = dma_chan;
+ tspi->tx_dma_buf = dma_buf;
+ tspi->tx_dma_phys = dma_phys;
+ }
+ return 0;
+
+scrub:
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+ return ret;
+}
+
+static void tegra_slink_deinit_dma_param(struct tegra_slink_data *tspi,
+ bool dma_to_memory)
+{
+ u32 *dma_buf;
+ dma_addr_t dma_phys;
+ struct dma_chan *dma_chan;
+
+ if (dma_to_memory) {
+ dma_buf = tspi->rx_dma_buf;
+ dma_chan = tspi->rx_dma_chan;
+ dma_phys = tspi->rx_dma_phys;
+ tspi->rx_dma_chan = NULL;
+ tspi->rx_dma_buf = NULL;
+ } else {
+ dma_buf = tspi->tx_dma_buf;
+ dma_chan = tspi->tx_dma_chan;
+ dma_phys = tspi->tx_dma_phys;
+ tspi->tx_dma_buf = NULL;
+ tspi->tx_dma_chan = NULL;
+ }
+ if (!dma_chan)
+ return;
+
+ dma_free_coherent(tspi->dev, tspi->dma_buf_size, dma_buf, dma_phys);
+ dma_release_channel(dma_chan);
+}
+
+static int tegra_slink_start_transfer_one(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 speed;
+ u8 bits_per_word;
+ unsigned total_fifo_words;
+ int ret;
+ u32 command;
+ u32 command2;
+
+ bits_per_word = t->bits_per_word;
+ speed = t->speed_hz;
+ if (speed != tspi->cur_speed) {
+ clk_set_rate(tspi->clk, speed * 4);
+ tspi->cur_speed = speed;
+ }
+
+ tspi->cur_spi = spi;
+ tspi->cur_pos = 0;
+ tspi->cur_rx_pos = 0;
+ tspi->cur_tx_pos = 0;
+ tspi->curr_xfer = t;
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(spi, tspi, t);
+
+ command = tspi->command_reg;
+ command &= ~SLINK_BIT_LENGTH(~0);
+ command |= SLINK_BIT_LENGTH(bits_per_word - 1);
+
+ command2 = tspi->command2_reg;
+ command2 &= ~(SLINK_RXEN | SLINK_TXEN);
+
+ tegra_slink_writel(tspi, command, SLINK_COMMAND);
+ tspi->command_reg = command;
+
+ tspi->cur_direction = 0;
+ if (t->rx_buf) {
+ command2 |= SLINK_RXEN;
+ tspi->cur_direction |= DATA_DIR_RX;
+ }
+ if (t->tx_buf) {
+ command2 |= SLINK_TXEN;
+ tspi->cur_direction |= DATA_DIR_TX;
+ }
+ tegra_slink_writel(tspi, command2, SLINK_COMMAND2);
+ tspi->command2_reg = command2;
+
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ ret = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ ret = tegra_slink_start_cpu_based_transfer(tspi, t);
+ return ret;
+}
+
+static int tegra_slink_setup(struct spi_device *spi)
+{
+ static const u32 cs_pol_bit[MAX_CHIP_SELECT] = {
+ SLINK_CS_POLARITY,
+ SLINK_CS_POLARITY1,
+ SLINK_CS_POLARITY2,
+ SLINK_CS_POLARITY3,
+ };
+
+ struct tegra_slink_data *tspi = spi_master_get_devdata(spi->master);
+ u32 val;
+ unsigned long flags;
+ int ret;
+
+ dev_dbg(&spi->dev, "setup %d bpw, %scpol, %scpha, %dHz\n",
+ spi->bits_per_word,
+ spi->mode & SPI_CPOL ? "" : "~",
+ spi->mode & SPI_CPHA ? "" : "~",
+ spi->max_speed_hz);
+
+ ret = pm_runtime_get_sync(tspi->dev);
+ if (ret < 0) {
+ dev_err(tspi->dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ val = tspi->def_command_reg;
+ if (spi->mode & SPI_CS_HIGH)
+ val |= cs_pol_bit[spi->chip_select];
+ else
+ val &= ~cs_pol_bit[spi->chip_select];
+ tspi->def_command_reg = val;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+
+ pm_runtime_put(tspi->dev);
+ return 0;
+}
+
+static int tegra_slink_prepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ struct spi_device *spi = msg->spi;
+
+ tegra_slink_clear_status(tspi);
+
+ tspi->command_reg = tspi->def_command_reg;
+ tspi->command_reg |= SLINK_CS_SW | SLINK_CS_VALUE;
+
+ tspi->command2_reg = tspi->def_command2_reg;
+ tspi->command2_reg |= SLINK_SS_EN_CS(spi->chip_select);
+
+ tspi->command_reg &= ~SLINK_MODES;
+ if (spi->mode & SPI_CPHA)
+ tspi->command_reg |= SLINK_CK_SDA;
+
+ if (spi->mode & SPI_CPOL)
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_HIGH;
+ else
+ tspi->command_reg |= SLINK_IDLE_SCLK_DRIVE_LOW;
+
+ return 0;
+}
+
+static int tegra_slink_transfer_one(struct spi_master *master,
+ struct spi_device *spi,
+ struct spi_transfer *xfer)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ reinit_completion(&tspi->xfer_completion);
+ ret = tegra_slink_start_transfer_one(spi, xfer);
+ if (ret < 0) {
+ dev_err(tspi->dev,
+ "spi can not start transfer, err %d\n", ret);
+ return ret;
+ }
+
+ ret = wait_for_completion_timeout(&tspi->xfer_completion,
+ SLINK_DMA_TIMEOUT);
+ if (WARN_ON(ret == 0)) {
+ dev_err(tspi->dev,
+ "spi trasfer timeout, err %d\n", ret);
+ return -EIO;
+ }
+
+ if (tspi->tx_status)
+ return tspi->tx_status;
+ if (tspi->rx_status)
+ return tspi->rx_status;
+
+ return 0;
+}
+
+static int tegra_slink_unprepare_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+
+ return 0;
+}
+
+static irqreturn_t handle_cpu_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (tspi->tx_status || tspi->rx_status ||
+ (tspi->status_reg & SLINK_BSY)) {
+ dev_err(tspi->dev,
+ "CpuXfer ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "CpuXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_deassert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_read_rx_fifo_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ tegra_slink_calculate_curr_xfer_param(tspi->cur_spi, tspi, t);
+ tegra_slink_start_cpu_based_transfer(tspi, t);
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t handle_dma_based_xfer(struct tegra_slink_data *tspi)
+{
+ struct spi_transfer *t = tspi->curr_xfer;
+ long wait_status;
+ int err = 0;
+ unsigned total_fifo_words;
+ unsigned long flags;
+
+ /* Abort dmas if any error */
+ if (tspi->cur_direction & DATA_DIR_TX) {
+ if (tspi->tx_status) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ err += 1;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->tx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->tx_dma_chan);
+ dev_err(tspi->dev, "TxDma Xfer failed\n");
+ err += 1;
+ }
+ }
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX) {
+ if (tspi->rx_status) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ err += 2;
+ } else {
+ wait_status = wait_for_completion_interruptible_timeout(
+ &tspi->rx_dma_complete, SLINK_DMA_TIMEOUT);
+ if (wait_status <= 0) {
+ dmaengine_terminate_all(tspi->rx_dma_chan);
+ dev_err(tspi->dev, "RxDma Xfer failed\n");
+ err += 2;
+ }
+ }
+ }
+
+ spin_lock_irqsave(&tspi->lock, flags);
+ if (err) {
+ dev_err(tspi->dev,
+ "DmaXfer: ERROR bit set 0x%x\n", tspi->status_reg);
+ dev_err(tspi->dev,
+ "DmaXfer 0x%08x:0x%08x:0x%08x\n", tspi->command_reg,
+ tspi->command2_reg, tspi->dma_control_reg);
+ reset_control_assert(tspi->rst);
+ udelay(2);
+ reset_control_assert(tspi->rst);
+ complete(&tspi->xfer_completion);
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+ }
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tegra_slink_copy_spi_rxbuf_to_client_rxbuf(tspi, t);
+
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->cur_pos = tspi->cur_tx_pos;
+ else
+ tspi->cur_pos = tspi->cur_rx_pos;
+
+ if (tspi->cur_pos == t->len) {
+ complete(&tspi->xfer_completion);
+ goto exit;
+ }
+
+ /* Continue transfer in current message */
+ total_fifo_words = tegra_slink_calculate_curr_xfer_param(tspi->cur_spi,
+ tspi, t);
+ if (total_fifo_words > SLINK_FIFO_DEPTH)
+ err = tegra_slink_start_dma_based_transfer(tspi, t);
+ else
+ err = tegra_slink_start_cpu_based_transfer(tspi, t);
+
+exit:
+ spin_unlock_irqrestore(&tspi->lock, flags);
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t tegra_slink_isr_thread(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ if (!tspi->is_curr_dma_xfer)
+ return handle_cpu_based_xfer(tspi);
+ return handle_dma_based_xfer(tspi);
+}
+
+static irqreturn_t tegra_slink_isr(int irq, void *context_data)
+{
+ struct tegra_slink_data *tspi = context_data;
+
+ tspi->status_reg = tegra_slink_readl(tspi, SLINK_STATUS);
+ if (tspi->cur_direction & DATA_DIR_TX)
+ tspi->tx_status = tspi->status_reg &
+ (SLINK_TX_OVF | SLINK_TX_UNF);
+
+ if (tspi->cur_direction & DATA_DIR_RX)
+ tspi->rx_status = tspi->status_reg &
+ (SLINK_RX_OVF | SLINK_RX_UNF);
+ tegra_slink_clear_status(tspi);
+
+ return IRQ_WAKE_THREAD;
+}
+
+static const struct tegra_slink_chip_data tegra30_spi_cdata = {
+ .cs_hold_time = true,
+};
+
+static const struct tegra_slink_chip_data tegra20_spi_cdata = {
+ .cs_hold_time = false,
+};
+
+static const struct of_device_id tegra_slink_of_match[] = {
+ { .compatible = "nvidia,tegra30-slink", .data = &tegra30_spi_cdata, },
+ { .compatible = "nvidia,tegra20-slink", .data = &tegra20_spi_cdata, },
+ {}
+};
+MODULE_DEVICE_TABLE(of, tegra_slink_of_match);
+
+static int tegra_slink_probe(struct platform_device *pdev)
+{
+ struct spi_master *master;
+ struct tegra_slink_data *tspi;
+ struct resource *r;
+ int ret, spi_irq;
+ const struct tegra_slink_chip_data *cdata = NULL;
+ const struct of_device_id *match;
+
+ match = of_match_device(tegra_slink_of_match, &pdev->dev);
+ if (!match) {
+ dev_err(&pdev->dev, "Error: No device match found\n");
+ return -ENODEV;
+ }
+ cdata = match->data;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*tspi));
+ if (!master) {
+ dev_err(&pdev->dev, "master allocation failed\n");
+ return -ENOMEM;
+ }
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
+ master->setup = tegra_slink_setup;
+ master->prepare_message = tegra_slink_prepare_message;
+ master->transfer_one = tegra_slink_transfer_one;
+ master->unprepare_message = tegra_slink_unprepare_message;
+ master->auto_runtime_pm = true;
+ master->num_chipselect = MAX_CHIP_SELECT;
+
+ platform_set_drvdata(pdev, master);
+ tspi = spi_master_get_devdata(master);
+ tspi->master = master;
+ tspi->dev = &pdev->dev;
+ tspi->chip_data = cdata;
+ spin_lock_init(&tspi->lock);
+
+ if (of_property_read_u32(tspi->dev->of_node, "spi-max-frequency",
+ &master->max_speed_hz))
+ master->max_speed_hz = 25000000; /* 25MHz */
+
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!r) {
+ dev_err(&pdev->dev, "No IO memory resource\n");
+ ret = -ENODEV;
+ goto exit_free_master;
+ }
+ tspi->phys = r->start;
+ tspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(tspi->base)) {
+ ret = PTR_ERR(tspi->base);
+ goto exit_free_master;
+ }
+
+ spi_irq = platform_get_irq(pdev, 0);
+ tspi->irq = spi_irq;
+ ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
+ tegra_slink_isr_thread, IRQF_ONESHOT,
+ dev_name(&pdev->dev), tspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ tspi->irq);
+ goto exit_free_master;
+ }
+
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ dev_err(&pdev->dev, "can not get clock\n");
+ ret = PTR_ERR(tspi->clk);
+ goto exit_free_irq;
+ }
+
+ tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
+ if (IS_ERR(tspi->rst)) {
+ dev_err(&pdev->dev, "can not get reset\n");
+ ret = PTR_ERR(tspi->rst);
+ goto exit_free_irq;
+ }
+
+ tspi->max_buf_size = SLINK_FIFO_DEPTH << 2;
+ tspi->dma_buf_size = DEFAULT_SPI_DMA_BUF_LEN;
+
+ ret = tegra_slink_init_dma_param(tspi, true);
+ if (ret < 0)
+ goto exit_free_irq;
+ ret = tegra_slink_init_dma_param(tspi, false);
+ if (ret < 0)
+ goto exit_rx_dma_free;
+ tspi->max_buf_size = tspi->dma_buf_size;
+ init_completion(&tspi->tx_dma_complete);
+ init_completion(&tspi->rx_dma_complete);
+
+ init_completion(&tspi->xfer_completion);
+
+ pm_runtime_enable(&pdev->dev);
+ if (!pm_runtime_enabled(&pdev->dev)) {
+ ret = tegra_slink_runtime_resume(&pdev->dev);
+ if (ret)
+ goto exit_pm_disable;
+ }
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "pm runtime get failed, e = %d\n", ret);
+ goto exit_pm_disable;
+ }
+ tspi->def_command_reg = SLINK_M_S;
+ tspi->def_command2_reg = SLINK_CS_ACTIVE_BETWEEN;
+ tegra_slink_writel(tspi, tspi->def_command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->def_command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(&pdev->dev);
+
+ master->dev.of_node = pdev->dev.of_node;
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "can not register to master err %d\n", ret);
+ goto exit_pm_disable;
+ }
+ return ret;
+
+exit_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+ tegra_slink_deinit_dma_param(tspi, false);
+exit_rx_dma_free:
+ tegra_slink_deinit_dma_param(tspi, true);
+exit_free_irq:
+ free_irq(spi_irq, tspi);
+exit_free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int tegra_slink_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ free_irq(tspi->irq, tspi);
+
+ if (tspi->tx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, false);
+
+ if (tspi->rx_dma_chan)
+ tegra_slink_deinit_dma_param(tspi, true);
+
+ pm_runtime_disable(&pdev->dev);
+ if (!pm_runtime_status_suspended(&pdev->dev))
+ tegra_slink_runtime_suspend(&pdev->dev);
+
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int tegra_slink_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+
+ return spi_master_suspend(master);
+}
+
+static int tegra_slink_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = pm_runtime_get_sync(dev);
+ if (ret < 0) {
+ dev_err(dev, "pm runtime failed, e = %d\n", ret);
+ return ret;
+ }
+ tegra_slink_writel(tspi, tspi->command_reg, SLINK_COMMAND);
+ tegra_slink_writel(tspi, tspi->command2_reg, SLINK_COMMAND2);
+ pm_runtime_put(dev);
+
+ return spi_master_resume(master);
+}
+#endif
+
+static int tegra_slink_runtime_suspend(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+
+ /* Flush all write which are in PPSB queue by reading back */
+ tegra_slink_readl(tspi, SLINK_MAS_DATA);
+
+ clk_disable_unprepare(tspi->clk);
+ return 0;
+}
+
+static int tegra_slink_runtime_resume(struct device *dev)
+{
+ struct spi_master *master = dev_get_drvdata(dev);
+ struct tegra_slink_data *tspi = spi_master_get_devdata(master);
+ int ret;
+
+ ret = clk_prepare_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(tspi->dev, "clk_prepare failed: %d\n", ret);
+ return ret;
+ }
+ return 0;
+}
+
+static const struct dev_pm_ops slink_pm_ops = {
+ SET_RUNTIME_PM_OPS(tegra_slink_runtime_suspend,
+ tegra_slink_runtime_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(tegra_slink_suspend, tegra_slink_resume)
+};
+static struct platform_driver tegra_slink_driver = {
+ .driver = {
+ .name = "spi-tegra-slink",
+ .pm = &slink_pm_ops,
+ .of_match_table = tegra_slink_of_match,
+ },
+ .probe = tegra_slink_probe,
+ .remove = tegra_slink_remove,
+};
+module_platform_driver(tegra_slink_driver);
+
+MODULE_ALIAS("platform:spi-tegra-slink");
+MODULE_DESCRIPTION("NVIDIA Tegra20/Tegra30 SLINK Controller Driver");
+MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/spi/spi-ti-qspi.c b/drivers/spi/spi-ti-qspi.c
new file mode 100644
index 000000000..5c0616870
--- /dev/null
+++ b/drivers/spi/spi-ti-qspi.c
@@ -0,0 +1,600 @@
+/*
+ * TI QSPI driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Sourav Poddar <sourav.poddar@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GPLv2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/omap-dma.h>
+#include <linux/platform_device.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/pm_runtime.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <linux/spi/spi.h>
+
+struct ti_qspi_regs {
+ u32 clkctrl;
+};
+
+struct ti_qspi {
+ struct completion transfer_complete;
+
+ /* list synchronization */
+ struct mutex list_lock;
+
+ struct spi_master *master;
+ void __iomem *base;
+ void __iomem *ctrl_base;
+ void __iomem *mmap_base;
+ struct clk *fclk;
+ struct device *dev;
+
+ struct ti_qspi_regs ctx_reg;
+
+ u32 spi_max_frequency;
+ u32 cmd;
+ u32 dc;
+
+ bool ctrl_mod;
+};
+
+#define QSPI_PID (0x0)
+#define QSPI_SYSCONFIG (0x10)
+#define QSPI_INTR_STATUS_RAW_SET (0x20)
+#define QSPI_INTR_STATUS_ENABLED_CLEAR (0x24)
+#define QSPI_INTR_ENABLE_SET_REG (0x28)
+#define QSPI_INTR_ENABLE_CLEAR_REG (0x2c)
+#define QSPI_SPI_CLOCK_CNTRL_REG (0x40)
+#define QSPI_SPI_DC_REG (0x44)
+#define QSPI_SPI_CMD_REG (0x48)
+#define QSPI_SPI_STATUS_REG (0x4c)
+#define QSPI_SPI_DATA_REG (0x50)
+#define QSPI_SPI_SETUP0_REG (0x54)
+#define QSPI_SPI_SWITCH_REG (0x64)
+#define QSPI_SPI_SETUP1_REG (0x58)
+#define QSPI_SPI_SETUP2_REG (0x5c)
+#define QSPI_SPI_SETUP3_REG (0x60)
+#define QSPI_SPI_DATA_REG_1 (0x68)
+#define QSPI_SPI_DATA_REG_2 (0x6c)
+#define QSPI_SPI_DATA_REG_3 (0x70)
+
+#define QSPI_COMPLETION_TIMEOUT msecs_to_jiffies(2000)
+
+#define QSPI_FCLK 192000000
+
+/* Clock Control */
+#define QSPI_CLK_EN (1 << 31)
+#define QSPI_CLK_DIV_MAX 0xffff
+
+/* Command */
+#define QSPI_EN_CS(n) (n << 28)
+#define QSPI_WLEN(n) ((n - 1) << 19)
+#define QSPI_3_PIN (1 << 18)
+#define QSPI_RD_SNGL (1 << 16)
+#define QSPI_WR_SNGL (2 << 16)
+#define QSPI_RD_DUAL (3 << 16)
+#define QSPI_RD_QUAD (7 << 16)
+#define QSPI_INVAL (4 << 16)
+#define QSPI_WC_CMD_INT_EN (1 << 14)
+#define QSPI_FLEN(n) ((n - 1) << 0)
+
+/* STATUS REGISTER */
+#define BUSY 0x01
+#define WC 0x02
+
+/* INTERRUPT REGISTER */
+#define QSPI_WC_INT_EN (1 << 1)
+#define QSPI_WC_INT_DISABLE (1 << 1)
+
+/* Device Control */
+#define QSPI_DD(m, n) (m << (3 + n * 8))
+#define QSPI_CKPHA(n) (1 << (2 + n * 8))
+#define QSPI_CSPOL(n) (1 << (1 + n * 8))
+#define QSPI_CKPOL(n) (1 << (n * 8))
+
+#define QSPI_FRAME 4096
+
+#define QSPI_AUTOSUSPEND_TIMEOUT 2000
+
+static inline unsigned long ti_qspi_read(struct ti_qspi *qspi,
+ unsigned long reg)
+{
+ return readl(qspi->base + reg);
+}
+
+static inline void ti_qspi_write(struct ti_qspi *qspi,
+ unsigned long val, unsigned long reg)
+{
+ writel(val, qspi->base + reg);
+}
+
+static int ti_qspi_setup(struct spi_device *spi)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(spi->master);
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+ int clk_div = 0, ret;
+ u32 clk_ctrl_reg, clk_rate, clk_mask;
+
+ if (spi->master->busy) {
+ dev_dbg(qspi->dev, "master busy doing other trasnfers\n");
+ return -EBUSY;
+ }
+
+ if (!qspi->spi_max_frequency) {
+ dev_err(qspi->dev, "spi max frequency not defined\n");
+ return -EINVAL;
+ }
+
+ clk_rate = clk_get_rate(qspi->fclk);
+
+ clk_div = DIV_ROUND_UP(clk_rate, qspi->spi_max_frequency) - 1;
+
+ if (clk_div < 0) {
+ dev_dbg(qspi->dev, "clock divider < 0, using /1 divider\n");
+ return -EINVAL;
+ }
+
+ if (clk_div > QSPI_CLK_DIV_MAX) {
+ dev_dbg(qspi->dev, "clock divider >%d , using /%d divider\n",
+ QSPI_CLK_DIV_MAX, QSPI_CLK_DIV_MAX + 1);
+ return -EINVAL;
+ }
+
+ dev_dbg(qspi->dev, "hz: %d, clock divider %d\n",
+ qspi->spi_max_frequency, clk_div);
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ clk_ctrl_reg = ti_qspi_read(qspi, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ clk_ctrl_reg &= ~QSPI_CLK_EN;
+
+ /* disable SCLK */
+ ti_qspi_write(qspi, clk_ctrl_reg, QSPI_SPI_CLOCK_CNTRL_REG);
+
+ /* enable SCLK */
+ clk_mask = QSPI_CLK_EN | clk_div;
+ ti_qspi_write(qspi, clk_mask, QSPI_SPI_CLOCK_CNTRL_REG);
+ ctx_reg->clkctrl = clk_mask;
+
+ pm_runtime_mark_last_busy(qspi->dev);
+ ret = pm_runtime_put_autosuspend(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_put_autosuspend() failed\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static void ti_qspi_restore_ctx(struct ti_qspi *qspi)
+{
+ struct ti_qspi_regs *ctx_reg = &qspi->ctx_reg;
+
+ ti_qspi_write(qspi, ctx_reg->clkctrl, QSPI_SPI_CLOCK_CNTRL_REG);
+}
+
+static inline u32 qspi_is_busy(struct ti_qspi *qspi)
+{
+ u32 stat;
+ unsigned long timeout = jiffies + QSPI_COMPLETION_TIMEOUT;
+
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ while ((stat & BUSY) && time_after(timeout, jiffies)) {
+ cpu_relax();
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+ }
+
+ WARN(stat & BUSY, "qspi busy\n");
+ return stat & BUSY;
+}
+
+static int qspi_write_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count;
+ unsigned int cmd;
+ const u8 *txbuf;
+
+ txbuf = t->tx_buf;
+ cmd = qspi->cmd | QSPI_WR_SNGL;
+ count = t->len;
+ wlen = t->bits_per_word >> 3; /* in bytes */
+
+ while (count) {
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
+ switch (wlen) {
+ case 1:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %02x\n",
+ cmd, qspi->dc, *txbuf);
+ writeb(*txbuf, qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 2:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %04x\n",
+ cmd, qspi->dc, *txbuf);
+ writew(*((u16 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ dev_dbg(qspi->dev, "tx cmd %08x dc %08x data %08x\n",
+ cmd, qspi->dc, *txbuf);
+ writel(*((u32 *)txbuf), qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ if (!wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT)) {
+ dev_err(qspi->dev, "write timed out\n");
+ return -ETIMEDOUT;
+ }
+ txbuf += wlen;
+ count -= wlen;
+ }
+
+ return 0;
+}
+
+static int qspi_read_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int wlen, count;
+ unsigned int cmd;
+ u8 *rxbuf;
+
+ rxbuf = t->rx_buf;
+ cmd = qspi->cmd;
+ switch (t->rx_nbits) {
+ case SPI_NBITS_DUAL:
+ cmd |= QSPI_RD_DUAL;
+ break;
+ case SPI_NBITS_QUAD:
+ cmd |= QSPI_RD_QUAD;
+ break;
+ default:
+ cmd |= QSPI_RD_SNGL;
+ break;
+ }
+ count = t->len;
+ wlen = t->bits_per_word >> 3; /* in bytes */
+
+ while (count) {
+ dev_dbg(qspi->dev, "rx cmd %08x dc %08x\n", cmd, qspi->dc);
+ if (qspi_is_busy(qspi))
+ return -EBUSY;
+
+ ti_qspi_write(qspi, cmd, QSPI_SPI_CMD_REG);
+ if (!wait_for_completion_timeout(&qspi->transfer_complete,
+ QSPI_COMPLETION_TIMEOUT)) {
+ dev_err(qspi->dev, "read timed out\n");
+ return -ETIMEDOUT;
+ }
+ switch (wlen) {
+ case 1:
+ *rxbuf = readb(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 2:
+ *((u16 *)rxbuf) = readw(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ case 4:
+ *((u32 *)rxbuf) = readl(qspi->base + QSPI_SPI_DATA_REG);
+ break;
+ }
+ rxbuf += wlen;
+ count -= wlen;
+ }
+
+ return 0;
+}
+
+static int qspi_transfer_msg(struct ti_qspi *qspi, struct spi_transfer *t)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ ret = qspi_write_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while writing\n");
+ return ret;
+ }
+ }
+
+ if (t->rx_buf) {
+ ret = qspi_read_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "Error while reading\n");
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static int ti_qspi_start_transfer_one(struct spi_master *master,
+ struct spi_message *m)
+{
+ struct ti_qspi *qspi = spi_master_get_devdata(master);
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ int status = 0, ret;
+ int frame_length;
+
+ /* setup device control reg */
+ qspi->dc = 0;
+
+ if (spi->mode & SPI_CPHA)
+ qspi->dc |= QSPI_CKPHA(spi->chip_select);
+ if (spi->mode & SPI_CPOL)
+ qspi->dc |= QSPI_CKPOL(spi->chip_select);
+ if (spi->mode & SPI_CS_HIGH)
+ qspi->dc |= QSPI_CSPOL(spi->chip_select);
+
+ frame_length = (m->frame_length << 3) / spi->bits_per_word;
+
+ frame_length = clamp(frame_length, 0, QSPI_FRAME);
+
+ /* setup command reg */
+ qspi->cmd = 0;
+ qspi->cmd |= QSPI_EN_CS(spi->chip_select);
+ qspi->cmd |= QSPI_FLEN(frame_length);
+ qspi->cmd |= QSPI_WC_CMD_INT_EN;
+
+ ti_qspi_write(qspi, QSPI_WC_INT_EN, QSPI_INTR_ENABLE_SET_REG);
+ ti_qspi_write(qspi, qspi->dc, QSPI_SPI_DC_REG);
+
+ mutex_lock(&qspi->list_lock);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ qspi->cmd |= QSPI_WLEN(t->bits_per_word);
+
+ ret = qspi_transfer_msg(qspi, t);
+ if (ret) {
+ dev_dbg(qspi->dev, "transfer message failed\n");
+ mutex_unlock(&qspi->list_lock);
+ return -EINVAL;
+ }
+
+ m->actual_length += t->len;
+ }
+
+ mutex_unlock(&qspi->list_lock);
+
+ m->status = status;
+ spi_finalize_current_message(master);
+
+ ti_qspi_write(qspi, qspi->cmd | QSPI_INVAL, QSPI_SPI_CMD_REG);
+
+ return status;
+}
+
+static irqreturn_t ti_qspi_isr(int irq, void *dev_id)
+{
+ struct ti_qspi *qspi = dev_id;
+ u16 int_stat;
+ u32 stat;
+
+ irqreturn_t ret = IRQ_HANDLED;
+
+ int_stat = ti_qspi_read(qspi, QSPI_INTR_STATUS_ENABLED_CLEAR);
+ stat = ti_qspi_read(qspi, QSPI_SPI_STATUS_REG);
+
+ if (!int_stat) {
+ dev_dbg(qspi->dev, "No IRQ triggered\n");
+ ret = IRQ_NONE;
+ goto out;
+ }
+
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE,
+ QSPI_INTR_STATUS_ENABLED_CLEAR);
+ if (stat & WC)
+ complete(&qspi->transfer_complete);
+out:
+ return ret;
+}
+
+static int ti_qspi_runtime_resume(struct device *dev)
+{
+ struct ti_qspi *qspi;
+
+ qspi = dev_get_drvdata(dev);
+ ti_qspi_restore_ctx(qspi);
+
+ return 0;
+}
+
+static const struct of_device_id ti_qspi_match[] = {
+ {.compatible = "ti,dra7xxx-qspi" },
+ {.compatible = "ti,am4372-qspi" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_qspi_match);
+
+static int ti_qspi_probe(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi;
+ struct spi_master *master;
+ struct resource *r, *res_ctrl, *res_mmap;
+ struct device_node *np = pdev->dev.of_node;
+ u32 max_freq;
+ int ret = 0, num_cs, irq;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(*qspi));
+ if (!master)
+ return -ENOMEM;
+
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_RX_DUAL | SPI_RX_QUAD;
+
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->setup = ti_qspi_setup;
+ master->auto_runtime_pm = true;
+ master->transfer_one_message = ti_qspi_start_transfer_one;
+ master->dev.of_node = pdev->dev.of_node;
+ master->bits_per_word_mask = SPI_BPW_MASK(32) | SPI_BPW_MASK(16) |
+ SPI_BPW_MASK(8);
+
+ if (!of_property_read_u32(np, "num-cs", &num_cs))
+ master->num_chipselect = num_cs;
+
+ qspi = spi_master_get_devdata(master);
+ qspi->master = master;
+ qspi->dev = &pdev->dev;
+ platform_set_drvdata(pdev, qspi);
+
+ r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_base");
+ if (r == NULL) {
+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (r == NULL) {
+ dev_err(&pdev->dev, "missing platform data\n");
+ return -ENODEV;
+ }
+ }
+
+ res_mmap = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_mmap");
+ if (res_mmap == NULL) {
+ res_mmap = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+ if (res_mmap == NULL) {
+ dev_err(&pdev->dev,
+ "memory mapped resource not required\n");
+ }
+ }
+
+ res_ctrl = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, "qspi_ctrlmod");
+ if (res_ctrl == NULL) {
+ res_ctrl = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+ if (res_ctrl == NULL) {
+ dev_dbg(&pdev->dev,
+ "control module resources not required\n");
+ }
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ dev_err(&pdev->dev, "no irq resource?\n");
+ return irq;
+ }
+
+ mutex_init(&qspi->list_lock);
+
+ qspi->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(qspi->base)) {
+ ret = PTR_ERR(qspi->base);
+ goto free_master;
+ }
+
+ if (res_ctrl) {
+ qspi->ctrl_mod = true;
+ qspi->ctrl_base = devm_ioremap_resource(&pdev->dev, res_ctrl);
+ if (IS_ERR(qspi->ctrl_base)) {
+ ret = PTR_ERR(qspi->ctrl_base);
+ goto free_master;
+ }
+ }
+
+ if (res_mmap) {
+ qspi->mmap_base = devm_ioremap_resource(&pdev->dev, res_mmap);
+ if (IS_ERR(qspi->mmap_base)) {
+ ret = PTR_ERR(qspi->mmap_base);
+ goto free_master;
+ }
+ }
+
+ ret = devm_request_irq(&pdev->dev, irq, ti_qspi_isr, 0,
+ dev_name(&pdev->dev), qspi);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
+ irq);
+ goto free_master;
+ }
+
+ qspi->fclk = devm_clk_get(&pdev->dev, "fck");
+ if (IS_ERR(qspi->fclk)) {
+ ret = PTR_ERR(qspi->fclk);
+ dev_err(&pdev->dev, "could not get clk: %d\n", ret);
+ }
+
+ init_completion(&qspi->transfer_complete);
+
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, QSPI_AUTOSUSPEND_TIMEOUT);
+ pm_runtime_enable(&pdev->dev);
+
+ if (!of_property_read_u32(np, "spi-max-frequency", &max_freq))
+ qspi->spi_max_frequency = max_freq;
+
+ ret = devm_spi_register_master(&pdev->dev, master);
+ if (ret)
+ goto free_master;
+
+ return 0;
+
+free_master:
+ spi_master_put(master);
+ return ret;
+}
+
+static int ti_qspi_remove(struct platform_device *pdev)
+{
+ struct ti_qspi *qspi = platform_get_drvdata(pdev);
+ int ret;
+
+ ret = pm_runtime_get_sync(qspi->dev);
+ if (ret < 0) {
+ dev_err(qspi->dev, "pm_runtime_get_sync() failed\n");
+ return ret;
+ }
+
+ ti_qspi_write(qspi, QSPI_WC_INT_DISABLE, QSPI_INTR_ENABLE_CLEAR_REG);
+
+ pm_runtime_put(qspi->dev);
+ pm_runtime_disable(&pdev->dev);
+
+ return 0;
+}
+
+static const struct dev_pm_ops ti_qspi_pm_ops = {
+ .runtime_resume = ti_qspi_runtime_resume,
+};
+
+static struct platform_driver ti_qspi_driver = {
+ .probe = ti_qspi_probe,
+ .remove = ti_qspi_remove,
+ .driver = {
+ .name = "ti-qspi",
+ .pm = &ti_qspi_pm_ops,
+ .of_match_table = ti_qspi_match,
+ }
+};
+
+module_platform_driver(ti_qspi_driver);
+
+MODULE_AUTHOR("Sourav Poddar <sourav.poddar@ti.com>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("TI QSPI controller driver");
+MODULE_ALIAS("platform:ti-qspi");
diff --git a/drivers/spi/spi-tle62x0.c b/drivers/spi/spi-tle62x0.c
new file mode 100644
index 000000000..daf5aa1c2
--- /dev/null
+++ b/drivers/spi/spi-tle62x0.c
@@ -0,0 +1,321 @@
+/*
+ * Support Infineon TLE62x0 driver chips
+ *
+ * Copyright (c) 2007 Simtec Electronics
+ * Ben Dooks, <ben@simtec.co.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/tle62x0.h>
+
+
+#define CMD_READ 0x00
+#define CMD_SET 0xff
+
+#define DIAG_NORMAL 0x03
+#define DIAG_OVERLOAD 0x02
+#define DIAG_OPEN 0x01
+#define DIAG_SHORTGND 0x00
+
+struct tle62x0_state {
+ struct spi_device *us;
+ struct mutex lock;
+ unsigned int nr_gpio;
+ unsigned int gpio_state;
+
+ unsigned char tx_buff[4];
+ unsigned char rx_buff[4];
+};
+
+static int to_gpio_num(struct device_attribute *attr);
+
+static inline int tle62x0_write(struct tle62x0_state *st)
+{
+ unsigned char *buff = st->tx_buff;
+ unsigned int gpio_state = st->gpio_state;
+
+ buff[0] = CMD_SET;
+
+ if (st->nr_gpio == 16) {
+ buff[1] = gpio_state >> 8;
+ buff[2] = gpio_state;
+ } else {
+ buff[1] = gpio_state;
+ }
+
+ dev_dbg(&st->us->dev, "buff %3ph\n", buff);
+
+ return spi_write(st->us, buff, (st->nr_gpio == 16) ? 3 : 2);
+}
+
+static inline int tle62x0_read(struct tle62x0_state *st)
+{
+ unsigned char *txbuff = st->tx_buff;
+ struct spi_transfer xfer = {
+ .tx_buf = txbuff,
+ .rx_buf = st->rx_buff,
+ .len = (st->nr_gpio * 2) / 8,
+ };
+ struct spi_message msg;
+
+ txbuff[0] = CMD_READ;
+ txbuff[1] = 0x00;
+ txbuff[2] = 0x00;
+ txbuff[3] = 0x00;
+
+ spi_message_init(&msg);
+ spi_message_add_tail(&xfer, &msg);
+
+ return spi_sync(st->us, &msg);
+}
+
+static unsigned char *decode_fault(unsigned int fault_code)
+{
+ fault_code &= 3;
+
+ switch (fault_code) {
+ case DIAG_NORMAL:
+ return "N";
+ case DIAG_OVERLOAD:
+ return "V";
+ case DIAG_OPEN:
+ return "O";
+ case DIAG_SHORTGND:
+ return "G";
+ }
+
+ return "?";
+}
+
+static ssize_t tle62x0_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ char *bp = buf;
+ unsigned char *buff = st->rx_buff;
+ unsigned long fault = 0;
+ int ptr;
+ int ret;
+
+ mutex_lock(&st->lock);
+ ret = tle62x0_read(st);
+ dev_dbg(dev, "tle62x0_read() returned %d\n", ret);
+ if (ret < 0) {
+ mutex_unlock(&st->lock);
+ return ret;
+ }
+
+ for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) {
+ fault <<= 8;
+ fault |= ((unsigned long)buff[ptr]);
+
+ dev_dbg(dev, "byte %d is %02x\n", ptr, buff[ptr]);
+ }
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++) {
+ bp += sprintf(bp, "%s ", decode_fault(fault >> (ptr * 2)));
+ }
+
+ *bp++ = '\n';
+
+ mutex_unlock(&st->lock);
+ return bp - buf;
+}
+
+static DEVICE_ATTR(status_show, S_IRUGO, tle62x0_status_show, NULL);
+
+static ssize_t tle62x0_gpio_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ int value;
+
+ mutex_lock(&st->lock);
+ value = (st->gpio_state >> gpio_num) & 1;
+ mutex_unlock(&st->lock);
+
+ return snprintf(buf, PAGE_SIZE, "%d", value);
+}
+
+static ssize_t tle62x0_gpio_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct tle62x0_state *st = dev_get_drvdata(dev);
+ int gpio_num = to_gpio_num(attr);
+ unsigned long val;
+ char *endp;
+
+ val = simple_strtoul(buf, &endp, 0);
+ if (buf == endp)
+ return -EINVAL;
+
+ dev_dbg(dev, "setting gpio %d to %ld\n", gpio_num, val);
+
+ mutex_lock(&st->lock);
+
+ if (val)
+ st->gpio_state |= 1 << gpio_num;
+ else
+ st->gpio_state &= ~(1 << gpio_num);
+
+ tle62x0_write(st);
+ mutex_unlock(&st->lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(gpio1, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio2, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio3, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio4, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio5, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio6, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio7, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio8, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio9, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio10, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio11, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio12, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio13, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio14, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio15, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+static DEVICE_ATTR(gpio16, S_IWUSR|S_IRUGO,
+ tle62x0_gpio_show, tle62x0_gpio_store);
+
+static struct device_attribute *gpio_attrs[] = {
+ [0] = &dev_attr_gpio1,
+ [1] = &dev_attr_gpio2,
+ [2] = &dev_attr_gpio3,
+ [3] = &dev_attr_gpio4,
+ [4] = &dev_attr_gpio5,
+ [5] = &dev_attr_gpio6,
+ [6] = &dev_attr_gpio7,
+ [7] = &dev_attr_gpio8,
+ [8] = &dev_attr_gpio9,
+ [9] = &dev_attr_gpio10,
+ [10] = &dev_attr_gpio11,
+ [11] = &dev_attr_gpio12,
+ [12] = &dev_attr_gpio13,
+ [13] = &dev_attr_gpio14,
+ [14] = &dev_attr_gpio15,
+ [15] = &dev_attr_gpio16
+};
+
+static int to_gpio_num(struct device_attribute *attr)
+{
+ int ptr;
+
+ for (ptr = 0; ptr < ARRAY_SIZE(gpio_attrs); ptr++) {
+ if (gpio_attrs[ptr] == attr)
+ return ptr;
+ }
+
+ return -1;
+}
+
+static int tle62x0_probe(struct spi_device *spi)
+{
+ struct tle62x0_state *st;
+ struct tle62x0_pdata *pdata;
+ int ptr;
+ int ret;
+
+ pdata = dev_get_platdata(&spi->dev);
+ if (pdata == NULL) {
+ dev_err(&spi->dev, "no device data specified\n");
+ return -EINVAL;
+ }
+
+ st = kzalloc(sizeof(struct tle62x0_state), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+
+ st->us = spi;
+ st->nr_gpio = pdata->gpio_count;
+ st->gpio_state = pdata->init_state;
+
+ mutex_init(&st->lock);
+
+ ret = device_create_file(&spi->dev, &dev_attr_status_show);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create status attribute\n");
+ goto err_status;
+ }
+
+ for (ptr = 0; ptr < pdata->gpio_count; ptr++) {
+ ret = device_create_file(&spi->dev, gpio_attrs[ptr]);
+ if (ret) {
+ dev_err(&spi->dev, "cannot create gpio attribute\n");
+ goto err_gpios;
+ }
+ }
+
+ /* tle62x0_write(st); */
+ spi_set_drvdata(spi, st);
+ return 0;
+
+ err_gpios:
+ while (--ptr >= 0)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+
+ err_status:
+ kfree(st);
+ return ret;
+}
+
+static int tle62x0_remove(struct spi_device *spi)
+{
+ struct tle62x0_state *st = spi_get_drvdata(spi);
+ int ptr;
+
+ for (ptr = 0; ptr < st->nr_gpio; ptr++)
+ device_remove_file(&spi->dev, gpio_attrs[ptr]);
+
+ device_remove_file(&spi->dev, &dev_attr_status_show);
+ kfree(st);
+ return 0;
+}
+
+static struct spi_driver tle62x0_driver = {
+ .driver = {
+ .name = "tle62x0",
+ .owner = THIS_MODULE,
+ },
+ .probe = tle62x0_probe,
+ .remove = tle62x0_remove,
+};
+
+module_spi_driver(tle62x0_driver);
+
+MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>");
+MODULE_DESCRIPTION("TLE62x0 SPI driver");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("spi:tle62x0");
diff --git a/drivers/spi/spi-topcliff-pch.c b/drivers/spi/spi-topcliff-pch.c
new file mode 100644
index 000000000..93dfcee0f
--- /dev/null
+++ b/drivers/spi/spi-topcliff-pch.c
@@ -0,0 +1,1754 @@
+/*
+ * SPI bus driver for the Topcliff PCH used by Intel SoCs
+ *
+ * Copyright (C) 2011 LAPIS Semiconductor Co., Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spi/spi.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/spi/spidev.h>
+#include <linux/module.h>
+#include <linux/device.h>
+#include <linux/platform_device.h>
+
+#include <linux/dmaengine.h>
+#include <linux/pch_dma.h>
+
+/* Register offsets */
+#define PCH_SPCR 0x00 /* SPI control register */
+#define PCH_SPBRR 0x04 /* SPI baud rate register */
+#define PCH_SPSR 0x08 /* SPI status register */
+#define PCH_SPDWR 0x0C /* SPI write data register */
+#define PCH_SPDRR 0x10 /* SPI read data register */
+#define PCH_SSNXCR 0x18 /* SSN Expand Control Register */
+#define PCH_SRST 0x1C /* SPI reset register */
+#define PCH_ADDRESS_SIZE 0x20
+
+#define PCH_SPSR_TFD 0x000007C0
+#define PCH_SPSR_RFD 0x0000F800
+
+#define PCH_READABLE(x) (((x) & PCH_SPSR_RFD)>>11)
+#define PCH_WRITABLE(x) (((x) & PCH_SPSR_TFD)>>6)
+
+#define PCH_RX_THOLD 7
+#define PCH_RX_THOLD_MAX 15
+
+#define PCH_TX_THOLD 2
+
+#define PCH_MAX_BAUDRATE 5000000
+#define PCH_MAX_FIFO_DEPTH 16
+
+#define STATUS_RUNNING 1
+#define STATUS_EXITING 2
+#define PCH_SLEEP_TIME 10
+
+#define SSN_LOW 0x02U
+#define SSN_HIGH 0x03U
+#define SSN_NO_CONTROL 0x00U
+#define PCH_MAX_CS 0xFF
+#define PCI_DEVICE_ID_GE_SPI 0x8816
+
+#define SPCR_SPE_BIT (1 << 0)
+#define SPCR_MSTR_BIT (1 << 1)
+#define SPCR_LSBF_BIT (1 << 4)
+#define SPCR_CPHA_BIT (1 << 5)
+#define SPCR_CPOL_BIT (1 << 6)
+#define SPCR_TFIE_BIT (1 << 8)
+#define SPCR_RFIE_BIT (1 << 9)
+#define SPCR_FIE_BIT (1 << 10)
+#define SPCR_ORIE_BIT (1 << 11)
+#define SPCR_MDFIE_BIT (1 << 12)
+#define SPCR_FICLR_BIT (1 << 24)
+#define SPSR_TFI_BIT (1 << 0)
+#define SPSR_RFI_BIT (1 << 1)
+#define SPSR_FI_BIT (1 << 2)
+#define SPSR_ORF_BIT (1 << 3)
+#define SPBRR_SIZE_BIT (1 << 10)
+
+#define PCH_ALL (SPCR_TFIE_BIT|SPCR_RFIE_BIT|SPCR_FIE_BIT|\
+ SPCR_ORIE_BIT|SPCR_MDFIE_BIT)
+
+#define SPCR_RFIC_FIELD 20
+#define SPCR_TFIC_FIELD 16
+
+#define MASK_SPBRR_SPBR_BITS ((1 << 10) - 1)
+#define MASK_RFIC_SPCR_BITS (0xf << SPCR_RFIC_FIELD)
+#define MASK_TFIC_SPCR_BITS (0xf << SPCR_TFIC_FIELD)
+
+#define PCH_CLOCK_HZ 50000000
+#define PCH_MAX_SPBR 1023
+
+/* Definition for ML7213/ML7223/ML7831 by LAPIS Semiconductor */
+#define PCI_VENDOR_ID_ROHM 0x10DB
+#define PCI_DEVICE_ID_ML7213_SPI 0x802c
+#define PCI_DEVICE_ID_ML7223_SPI 0x800F
+#define PCI_DEVICE_ID_ML7831_SPI 0x8816
+
+/*
+ * Set the number of SPI instance max
+ * Intel EG20T PCH : 1ch
+ * LAPIS Semiconductor ML7213 IOH : 2ch
+ * LAPIS Semiconductor ML7223 IOH : 1ch
+ * LAPIS Semiconductor ML7831 IOH : 1ch
+*/
+#define PCH_SPI_MAX_DEV 2
+
+#define PCH_BUF_SIZE 4096
+#define PCH_DMA_TRANS_SIZE 12
+
+static int use_dma = 1;
+
+struct pch_spi_dma_ctrl {
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ struct pch_dma_slave param_tx;
+ struct pch_dma_slave param_rx;
+ struct dma_chan *chan_tx;
+ struct dma_chan *chan_rx;
+ struct scatterlist *sg_tx_p;
+ struct scatterlist *sg_rx_p;
+ struct scatterlist sg_tx;
+ struct scatterlist sg_rx;
+ int nent;
+ void *tx_buf_virt;
+ void *rx_buf_virt;
+ dma_addr_t tx_buf_dma;
+ dma_addr_t rx_buf_dma;
+};
+/**
+ * struct pch_spi_data - Holds the SPI channel specific details
+ * @io_remap_addr: The remapped PCI base address
+ * @master: Pointer to the SPI master structure
+ * @work: Reference to work queue handler
+ * @wk: Workqueue for carrying out execution of the
+ * requests
+ * @wait: Wait queue for waking up upon receiving an
+ * interrupt.
+ * @transfer_complete: Status of SPI Transfer
+ * @bcurrent_msg_processing: Status flag for message processing
+ * @lock: Lock for protecting this structure
+ * @queue: SPI Message queue
+ * @status: Status of the SPI driver
+ * @bpw_len: Length of data to be transferred in bits per
+ * word
+ * @transfer_active: Flag showing active transfer
+ * @tx_index: Transmit data count; for bookkeeping during
+ * transfer
+ * @rx_index: Receive data count; for bookkeeping during
+ * transfer
+ * @tx_buff: Buffer for data to be transmitted
+ * @rx_index: Buffer for Received data
+ * @n_curnt_chip: The chip number that this SPI driver currently
+ * operates on
+ * @current_chip: Reference to the current chip that this SPI
+ * driver currently operates on
+ * @current_msg: The current message that this SPI driver is
+ * handling
+ * @cur_trans: The current transfer that this SPI driver is
+ * handling
+ * @board_dat: Reference to the SPI device data structure
+ * @plat_dev: platform_device structure
+ * @ch: SPI channel number
+ * @irq_reg_sts: Status of IRQ registration
+ */
+struct pch_spi_data {
+ void __iomem *io_remap_addr;
+ unsigned long io_base_addr;
+ struct spi_master *master;
+ struct work_struct work;
+ struct workqueue_struct *wk;
+ wait_queue_head_t wait;
+ u8 transfer_complete;
+ u8 bcurrent_msg_processing;
+ spinlock_t lock;
+ struct list_head queue;
+ u8 status;
+ u32 bpw_len;
+ u8 transfer_active;
+ u32 tx_index;
+ u32 rx_index;
+ u16 *pkt_tx_buff;
+ u16 *pkt_rx_buff;
+ u8 n_curnt_chip;
+ struct spi_device *current_chip;
+ struct spi_message *current_msg;
+ struct spi_transfer *cur_trans;
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *plat_dev;
+ int ch;
+ struct pch_spi_dma_ctrl dma;
+ int use_dma;
+ u8 irq_reg_sts;
+ int save_total_len;
+};
+
+/**
+ * struct pch_spi_board_data - Holds the SPI device specific details
+ * @pdev: Pointer to the PCI device
+ * @suspend_sts: Status of suspend
+ * @num: The number of SPI device instance
+ */
+struct pch_spi_board_data {
+ struct pci_dev *pdev;
+ u8 suspend_sts;
+ int num;
+};
+
+struct pch_pd_dev_save {
+ int num;
+ struct platform_device *pd_save[PCH_SPI_MAX_DEV];
+ struct pch_spi_board_data *board_dat;
+};
+
+static const struct pci_device_id pch_spi_pcidev_id[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_GE_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_SPI), 2, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7223_SPI), 1, },
+ { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7831_SPI), 1, },
+ { }
+};
+
+/**
+ * pch_spi_writereg() - Performs register writes
+ * @master: Pointer to struct spi_master.
+ * @idx: Register offset.
+ * @val: Value to be written to register.
+ */
+static inline void pch_spi_writereg(struct spi_master *master, int idx, u32 val)
+{
+ struct pch_spi_data *data = spi_master_get_devdata(master);
+ iowrite32(val, (data->io_remap_addr + idx));
+}
+
+/**
+ * pch_spi_readreg() - Performs register reads
+ * @master: Pointer to struct spi_master.
+ * @idx: Register offset.
+ */
+static inline u32 pch_spi_readreg(struct spi_master *master, int idx)
+{
+ struct pch_spi_data *data = spi_master_get_devdata(master);
+ return ioread32(data->io_remap_addr + idx);
+}
+
+static inline void pch_spi_setclr_reg(struct spi_master *master, int idx,
+ u32 set, u32 clr)
+{
+ u32 tmp = pch_spi_readreg(master, idx);
+ tmp = (tmp & ~clr) | set;
+ pch_spi_writereg(master, idx, tmp);
+}
+
+static void pch_spi_set_master_mode(struct spi_master *master)
+{
+ pch_spi_setclr_reg(master, PCH_SPCR, SPCR_MSTR_BIT, 0);
+}
+
+/**
+ * pch_spi_clear_fifo() - Clears the Transmit and Receive FIFOs
+ * @master: Pointer to struct spi_master.
+ */
+static void pch_spi_clear_fifo(struct spi_master *master)
+{
+ pch_spi_setclr_reg(master, PCH_SPCR, SPCR_FICLR_BIT, 0);
+ pch_spi_setclr_reg(master, PCH_SPCR, 0, SPCR_FICLR_BIT);
+}
+
+static void pch_spi_handler_sub(struct pch_spi_data *data, u32 reg_spsr_val,
+ void __iomem *io_remap_addr)
+{
+ u32 n_read, tx_index, rx_index, bpw_len;
+ u16 *pkt_rx_buffer, *pkt_tx_buff;
+ int read_cnt;
+ u32 reg_spcr_val;
+ void __iomem *spsr;
+ void __iomem *spdrr;
+ void __iomem *spdwr;
+
+ spsr = io_remap_addr + PCH_SPSR;
+ iowrite32(reg_spsr_val, spsr);
+
+ if (data->transfer_active) {
+ rx_index = data->rx_index;
+ tx_index = data->tx_index;
+ bpw_len = data->bpw_len;
+ pkt_rx_buffer = data->pkt_rx_buff;
+ pkt_tx_buff = data->pkt_tx_buff;
+
+ spdrr = io_remap_addr + PCH_SPDRR;
+ spdwr = io_remap_addr + PCH_SPDWR;
+
+ n_read = PCH_READABLE(reg_spsr_val);
+
+ for (read_cnt = 0; (read_cnt < n_read); read_cnt++) {
+ pkt_rx_buffer[rx_index++] = ioread32(spdrr);
+ if (tx_index < bpw_len)
+ iowrite32(pkt_tx_buff[tx_index++], spdwr);
+ }
+
+ /* disable RFI if not needed */
+ if ((bpw_len - rx_index) <= PCH_MAX_FIFO_DEPTH) {
+ reg_spcr_val = ioread32(io_remap_addr + PCH_SPCR);
+ reg_spcr_val &= ~SPCR_RFIE_BIT; /* disable RFI */
+
+ /* reset rx threshold */
+ reg_spcr_val &= ~MASK_RFIC_SPCR_BITS;
+ reg_spcr_val |= (PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD);
+
+ iowrite32(reg_spcr_val, (io_remap_addr + PCH_SPCR));
+ }
+
+ /* update counts */
+ data->tx_index = tx_index;
+ data->rx_index = rx_index;
+
+ /* if transfer complete interrupt */
+ if (reg_spsr_val & SPSR_FI_BIT) {
+ if ((tx_index == bpw_len) && (rx_index == tx_index)) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ PCH_ALL);
+
+ /* transfer is completed;
+ inform pch_spi_process_messages */
+ data->transfer_complete = true;
+ data->transfer_active = false;
+ wake_up(&data->wait);
+ } else {
+ dev_vdbg(&data->master->dev,
+ "%s : Transfer is not completed",
+ __func__);
+ }
+ }
+ }
+}
+
+/**
+ * pch_spi_handler() - Interrupt handler
+ * @irq: The interrupt number.
+ * @dev_id: Pointer to struct pch_spi_board_data.
+ */
+static irqreturn_t pch_spi_handler(int irq, void *dev_id)
+{
+ u32 reg_spsr_val;
+ void __iomem *spsr;
+ void __iomem *io_remap_addr;
+ irqreturn_t ret = IRQ_NONE;
+ struct pch_spi_data *data = dev_id;
+ struct pch_spi_board_data *board_dat = data->board_dat;
+
+ if (board_dat->suspend_sts) {
+ dev_dbg(&board_dat->pdev->dev,
+ "%s returning due to suspend\n", __func__);
+ return IRQ_NONE;
+ }
+
+ io_remap_addr = data->io_remap_addr;
+ spsr = io_remap_addr + PCH_SPSR;
+
+ reg_spsr_val = ioread32(spsr);
+
+ if (reg_spsr_val & SPSR_ORF_BIT) {
+ dev_err(&board_dat->pdev->dev, "%s Over run error\n", __func__);
+ if (data->current_msg->complete) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete(data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ }
+ }
+
+ if (data->use_dma)
+ return IRQ_NONE;
+
+ /* Check if the interrupt is for SPI device */
+ if (reg_spsr_val & (SPSR_FI_BIT | SPSR_RFI_BIT)) {
+ pch_spi_handler_sub(data, reg_spsr_val, io_remap_addr);
+ ret = IRQ_HANDLED;
+ }
+
+ dev_dbg(&board_dat->pdev->dev, "%s EXIT return value=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+/**
+ * pch_spi_set_baud_rate() - Sets SPBR field in SPBRR
+ * @master: Pointer to struct spi_master.
+ * @speed_hz: Baud rate.
+ */
+static void pch_spi_set_baud_rate(struct spi_master *master, u32 speed_hz)
+{
+ u32 n_spbr = PCH_CLOCK_HZ / (speed_hz * 2);
+
+ /* if baud rate is less than we can support limit it */
+ if (n_spbr > PCH_MAX_SPBR)
+ n_spbr = PCH_MAX_SPBR;
+
+ pch_spi_setclr_reg(master, PCH_SPBRR, n_spbr, MASK_SPBRR_SPBR_BITS);
+}
+
+/**
+ * pch_spi_set_bits_per_word() - Sets SIZE field in SPBRR
+ * @master: Pointer to struct spi_master.
+ * @bits_per_word: Bits per word for SPI transfer.
+ */
+static void pch_spi_set_bits_per_word(struct spi_master *master,
+ u8 bits_per_word)
+{
+ if (bits_per_word == 8)
+ pch_spi_setclr_reg(master, PCH_SPBRR, 0, SPBRR_SIZE_BIT);
+ else
+ pch_spi_setclr_reg(master, PCH_SPBRR, SPBRR_SIZE_BIT, 0);
+}
+
+/**
+ * pch_spi_setup_transfer() - Configures the PCH SPI hardware for transfer
+ * @spi: Pointer to struct spi_device.
+ */
+static void pch_spi_setup_transfer(struct spi_device *spi)
+{
+ u32 flags = 0;
+
+ dev_dbg(&spi->dev, "%s SPBRR content =%x setting baud rate=%d\n",
+ __func__, pch_spi_readreg(spi->master, PCH_SPBRR),
+ spi->max_speed_hz);
+ pch_spi_set_baud_rate(spi->master, spi->max_speed_hz);
+
+ /* set bits per word */
+ pch_spi_set_bits_per_word(spi->master, spi->bits_per_word);
+
+ if (!(spi->mode & SPI_LSB_FIRST))
+ flags |= SPCR_LSBF_BIT;
+ if (spi->mode & SPI_CPOL)
+ flags |= SPCR_CPOL_BIT;
+ if (spi->mode & SPI_CPHA)
+ flags |= SPCR_CPHA_BIT;
+ pch_spi_setclr_reg(spi->master, PCH_SPCR, flags,
+ (SPCR_LSBF_BIT | SPCR_CPOL_BIT | SPCR_CPHA_BIT));
+
+ /* Clear the FIFO by toggling FICLR to 1 and back to 0 */
+ pch_spi_clear_fifo(spi->master);
+}
+
+/**
+ * pch_spi_reset() - Clears SPI registers
+ * @master: Pointer to struct spi_master.
+ */
+static void pch_spi_reset(struct spi_master *master)
+{
+ /* write 1 to reset SPI */
+ pch_spi_writereg(master, PCH_SRST, 0x1);
+
+ /* clear reset */
+ pch_spi_writereg(master, PCH_SRST, 0x0);
+}
+
+static int pch_spi_transfer(struct spi_device *pspi, struct spi_message *pmsg)
+{
+
+ struct spi_transfer *transfer;
+ struct pch_spi_data *data = spi_master_get_devdata(pspi->master);
+ int retval;
+ unsigned long flags;
+
+ spin_lock_irqsave(&data->lock, flags);
+ /* validate Tx/Rx buffers and Transfer length */
+ list_for_each_entry(transfer, &pmsg->transfers, transfer_list) {
+ if (!transfer->tx_buf && !transfer->rx_buf) {
+ dev_err(&pspi->dev,
+ "%s Tx and Rx buffer NULL\n", __func__);
+ retval = -EINVAL;
+ goto err_return_spinlock;
+ }
+
+ if (!transfer->len) {
+ dev_err(&pspi->dev, "%s Transfer length invalid\n",
+ __func__);
+ retval = -EINVAL;
+ goto err_return_spinlock;
+ }
+
+ dev_dbg(&pspi->dev,
+ "%s Tx/Rx buffer valid. Transfer length valid\n",
+ __func__);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* We won't process any messages if we have been asked to terminate */
+ if (data->status == STATUS_EXITING) {
+ dev_err(&pspi->dev, "%s status = STATUS_EXITING.\n", __func__);
+ retval = -ESHUTDOWN;
+ goto err_out;
+ }
+
+ /* If suspended ,return -EINVAL */
+ if (data->board_dat->suspend_sts) {
+ dev_err(&pspi->dev, "%s suspend; returning EINVAL\n", __func__);
+ retval = -EINVAL;
+ goto err_out;
+ }
+
+ /* set status of message */
+ pmsg->actual_length = 0;
+ dev_dbg(&pspi->dev, "%s - pmsg->status =%d\n", __func__, pmsg->status);
+
+ pmsg->status = -EINPROGRESS;
+ spin_lock_irqsave(&data->lock, flags);
+ /* add message to queue */
+ list_add_tail(&pmsg->queue, &data->queue);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ dev_dbg(&pspi->dev, "%s - Invoked list_add_tail\n", __func__);
+
+ /* schedule work queue to run */
+ queue_work(data->wk, &data->work);
+ dev_dbg(&pspi->dev, "%s - Invoked queue work\n", __func__);
+
+ retval = 0;
+
+err_out:
+ dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+ return retval;
+err_return_spinlock:
+ dev_dbg(&pspi->dev, "%s RETURN=%d\n", __func__, retval);
+ spin_unlock_irqrestore(&data->lock, flags);
+ return retval;
+}
+
+static inline void pch_spi_select_chip(struct pch_spi_data *data,
+ struct spi_device *pspi)
+{
+ if (data->current_chip != NULL) {
+ if (pspi->chip_select != data->n_curnt_chip) {
+ dev_dbg(&pspi->dev, "%s : different slave\n", __func__);
+ data->current_chip = NULL;
+ }
+ }
+
+ data->current_chip = pspi;
+
+ data->n_curnt_chip = data->current_chip->chip_select;
+
+ dev_dbg(&pspi->dev, "%s :Invoking pch_spi_setup_transfer\n", __func__);
+ pch_spi_setup_transfer(pspi);
+}
+
+static void pch_spi_set_tx(struct pch_spi_data *data, int *bpw)
+{
+ int size;
+ u32 n_writes;
+ int j;
+ struct spi_message *pmsg, *tmp;
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word != data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+
+ /* reset Tx/Rx index */
+ data->tx_index = 0;
+ data->rx_index = 0;
+
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ /* find alloc size */
+ size = data->cur_trans->len * sizeof(*data->pkt_tx_buff);
+
+ /* allocate memory for pkt_tx_buff & pkt_rx_buffer */
+ data->pkt_tx_buff = kzalloc(size, GFP_KERNEL);
+ if (data->pkt_tx_buff != NULL) {
+ data->pkt_rx_buff = kzalloc(size, GFP_KERNEL);
+ if (!data->pkt_rx_buff)
+ kfree(data->pkt_tx_buff);
+ }
+
+ if (!data->pkt_rx_buff) {
+ /* flush queue and set status of all transfers to -ENOMEM */
+ dev_err(&data->master->dev, "%s :kzalloc failed\n", __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -ENOMEM;
+
+ if (pmsg->complete)
+ pmsg->complete(pmsg->context);
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+ return;
+ }
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ data->pkt_tx_buff[j] = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ data->pkt_tx_buff[j] = *tx_sbuf++;
+ }
+ }
+
+ /* if len greater than PCH_MAX_FIFO_DEPTH, write 16,else len bytes */
+ n_writes = data->bpw_len;
+ if (n_writes > PCH_MAX_FIFO_DEPTH)
+ n_writes = PCH_MAX_FIFO_DEPTH;
+
+ dev_dbg(&data->master->dev, "\n%s:Pulling down SSN low - writing "
+ "0x2 to SSNXCR\n", __func__);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+
+ for (j = 0; j < n_writes; j++)
+ pch_spi_writereg(data->master, PCH_SPDWR, data->pkt_tx_buff[j]);
+
+ /* update tx_index */
+ data->tx_index = j;
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+ data->transfer_active = true;
+}
+
+static void pch_spi_nomore_transfer(struct pch_spi_data *data)
+{
+ struct spi_message *pmsg, *tmp;
+ dev_dbg(&data->master->dev, "%s called\n", __func__);
+ /* Invoke complete callback
+ * [To the spi core..indicating end of transfer] */
+ data->current_msg->status = 0;
+
+ if (data->current_msg->complete) {
+ dev_dbg(&data->master->dev,
+ "%s:Invoking callback of SPI core\n", __func__);
+ data->current_msg->complete(data->current_msg->context);
+ }
+
+ /* update status in global variable */
+ data->bcurrent_msg_processing = false;
+
+ dev_dbg(&data->master->dev,
+ "%s:data->bcurrent_msg_processing = false\n", __func__);
+
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+
+ /* check if we have items in list and not suspending
+ * return 1 if list empty */
+ if ((list_empty(&data->queue) == 0) &&
+ (!data->board_dat->suspend_sts) &&
+ (data->status != STATUS_EXITING)) {
+ /* We have some more work to do (either there is more tranint
+ * bpw;sfer requests in the current message or there are
+ *more messages)
+ */
+ dev_dbg(&data->master->dev, "%s:Invoke queue_work\n", __func__);
+ queue_work(data->wk, &data->work);
+ } else if (data->board_dat->suspend_sts ||
+ data->status == STATUS_EXITING) {
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n",
+ __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -EIO;
+
+ if (pmsg->complete)
+ pmsg->complete(pmsg->context);
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+ }
+}
+
+static void pch_spi_set_ir(struct pch_spi_data *data)
+{
+ /* enable interrupts, set threshold, enable SPI */
+ if ((data->bpw_len) > PCH_MAX_FIFO_DEPTH)
+ /* set receive threshold to PCH_RX_THOLD */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ PCH_RX_THOLD << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_RFIE_BIT |
+ SPCR_ORIE_BIT | SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+ else
+ /* set receive threshold to maximum */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ PCH_RX_THOLD_MAX << SPCR_RFIC_FIELD |
+ SPCR_FIE_BIT | SPCR_ORIE_BIT |
+ SPCR_SPE_BIT,
+ MASK_RFIC_SPCR_BITS | PCH_ALL);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+
+ wait_event_interruptible(data->wait, data->transfer_complete);
+
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* Disable interrupts and SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL | SPCR_SPE_BIT);
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+}
+
+static void pch_spi_copy_rx_data(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = data->pkt_rx_buff[j] & 0xFF;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = data->pkt_rx_buff[j];
+ }
+}
+
+static void pch_spi_copy_rx_data_for_dma(struct pch_spi_data *data, int bpw)
+{
+ int j;
+ u8 *rx_buf;
+ u16 *rx_sbuf;
+ const u8 *rx_dma_buf;
+ const u16 *rx_dma_sbuf;
+
+ /* copy Rx Data */
+ if (!data->cur_trans->rx_buf)
+ return;
+
+ if (bpw == 8) {
+ rx_buf = data->cur_trans->rx_buf;
+ rx_dma_buf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_buf++ = *rx_dma_buf++ & 0xFF;
+ data->cur_trans->rx_buf = rx_buf;
+ } else {
+ rx_sbuf = data->cur_trans->rx_buf;
+ rx_dma_sbuf = data->dma.rx_buf_virt;
+ for (j = 0; j < data->bpw_len; j++)
+ *rx_sbuf++ = *rx_dma_sbuf++;
+ data->cur_trans->rx_buf = rx_sbuf;
+ }
+}
+
+static int pch_spi_start_transfer(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+ unsigned long flags;
+ int rtn;
+
+ dma = &data->dma;
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* disable interrupts, SPI set enable */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, SPCR_SPE_BIT, PCH_ALL);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* Wait until the transfer completes; go to sleep after
+ initiating the transfer. */
+ dev_dbg(&data->master->dev,
+ "%s:waiting for transfer to get over\n", __func__);
+ rtn = wait_event_interruptible_timeout(data->wait,
+ data->transfer_complete,
+ msecs_to_jiffies(2 * HZ));
+ if (!rtn)
+ dev_err(&data->master->dev,
+ "%s wait-event timeout\n", __func__);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent,
+ DMA_FROM_DEVICE);
+
+ dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent,
+ DMA_FROM_DEVICE);
+ memset(data->dma.tx_buf_virt, 0, PAGE_SIZE);
+
+ async_tx_ack(dma->desc_rx);
+ async_tx_ack(dma->desc_tx);
+ kfree(dma->sg_tx_p);
+ kfree(dma->sg_rx_p);
+
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* clear fifo threshold, disable interrupts, disable SPI transfer */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0,
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS | PCH_ALL |
+ SPCR_SPE_BIT);
+ /* clear all interrupts */
+ pch_spi_writereg(data->master, PCH_SPSR,
+ pch_spi_readreg(data->master, PCH_SPSR));
+ /* clear FIFO */
+ pch_spi_clear_fifo(data->master);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ return rtn;
+}
+
+static void pch_dma_rx_complete(void *arg)
+{
+ struct pch_spi_data *data = arg;
+
+ /* transfer is completed;inform pch_spi_process_messages_dma */
+ data->transfer_complete = true;
+ wake_up_interruptible(&data->wait);
+}
+
+static bool pch_spi_filter(struct dma_chan *chan, void *slave)
+{
+ struct pch_dma_slave *param = slave;
+
+ if ((chan->chan_id == param->chan_id) &&
+ (param->dma_dev == chan->device->dev)) {
+ chan->private = param;
+ return true;
+ } else {
+ return false;
+ }
+}
+
+static void pch_spi_request_dma(struct pch_spi_data *data, int bpw)
+{
+ dma_cap_mask_t mask;
+ struct dma_chan *chan;
+ struct pci_dev *dma_dev;
+ struct pch_dma_slave *param;
+ struct pch_spi_dma_ctrl *dma;
+ unsigned int width;
+
+ if (bpw == 8)
+ width = PCH_DMA_WIDTH_1_BYTE;
+ else
+ width = PCH_DMA_WIDTH_2_BYTES;
+
+ dma = &data->dma;
+ dma_cap_zero(mask);
+ dma_cap_set(DMA_SLAVE, mask);
+
+ /* Get DMA's dev information */
+ dma_dev = pci_get_slot(data->board_dat->pdev->bus,
+ PCI_DEVFN(PCI_SLOT(data->board_dat->pdev->devfn), 0));
+
+ /* Set Tx DMA */
+ param = &dma->param_tx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->ch * 2; /* Tx = 0, 2 */;
+ param->tx_reg = data->io_base_addr + PCH_SPDWR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Tx)\n");
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_tx = chan;
+
+ /* Set Rx DMA */
+ param = &dma->param_rx;
+ param->dma_dev = &dma_dev->dev;
+ param->chan_id = data->ch * 2 + 1; /* Rx = Tx + 1 */;
+ param->rx_reg = data->io_base_addr + PCH_SPDRR;
+ param->width = width;
+ chan = dma_request_channel(mask, pch_spi_filter, param);
+ if (!chan) {
+ dev_err(&data->master->dev,
+ "ERROR: dma_request_channel FAILS(Rx)\n");
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ data->use_dma = 0;
+ return;
+ }
+ dma->chan_rx = chan;
+}
+
+static void pch_spi_release_dma(struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->chan_tx) {
+ dma_release_channel(dma->chan_tx);
+ dma->chan_tx = NULL;
+ }
+ if (dma->chan_rx) {
+ dma_release_channel(dma->chan_rx);
+ dma->chan_rx = NULL;
+ }
+ return;
+}
+
+static void pch_spi_handle_dma(struct pch_spi_data *data, int *bpw)
+{
+ const u8 *tx_buf;
+ const u16 *tx_sbuf;
+ u8 *tx_dma_buf;
+ u16 *tx_dma_sbuf;
+ struct scatterlist *sg;
+ struct dma_async_tx_descriptor *desc_tx;
+ struct dma_async_tx_descriptor *desc_rx;
+ int num;
+ int i;
+ int size;
+ int rem;
+ int head;
+ unsigned long flags;
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+
+ /* set baud rate if needed */
+ if (data->cur_trans->speed_hz) {
+ dev_dbg(&data->master->dev, "%s:setting baud rate\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_baud_rate(data->master, data->cur_trans->speed_hz);
+ spin_unlock_irqrestore(&data->lock, flags);
+ }
+
+ /* set bits per word if needed */
+ if (data->cur_trans->bits_per_word &&
+ (data->current_msg->spi->bits_per_word !=
+ data->cur_trans->bits_per_word)) {
+ dev_dbg(&data->master->dev, "%s:set bits per word\n", __func__);
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_set_bits_per_word(data->master,
+ data->cur_trans->bits_per_word);
+ spin_unlock_irqrestore(&data->lock, flags);
+ *bpw = data->cur_trans->bits_per_word;
+ } else {
+ *bpw = data->current_msg->spi->bits_per_word;
+ }
+ data->bpw_len = data->cur_trans->len / (*bpw / 8);
+
+ if (data->bpw_len > PCH_BUF_SIZE) {
+ data->bpw_len = PCH_BUF_SIZE;
+ data->cur_trans->len -= PCH_BUF_SIZE;
+ }
+
+ /* copy Tx Data */
+ if (data->cur_trans->tx_buf != NULL) {
+ if (*bpw == 8) {
+ tx_buf = data->cur_trans->tx_buf;
+ tx_dma_buf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_buf++ = *tx_buf++;
+ } else {
+ tx_sbuf = data->cur_trans->tx_buf;
+ tx_dma_sbuf = dma->tx_buf_virt;
+ for (i = 0; i < data->bpw_len; i++)
+ *tx_dma_sbuf++ = *tx_sbuf++;
+ }
+ }
+
+ /* Calculate Rx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_DMA_TRANS_SIZE) {
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = PCH_DMA_TRANS_SIZE;
+ }
+ size = PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ }
+ dev_dbg(&data->master->dev, "%s num=%d size=%d rem=%d\n",
+ __func__, num, size, rem);
+ spin_lock_irqsave(&data->lock, flags);
+
+ /* set receive fifo threshold and transmit fifo threshold */
+ pch_spi_setclr_reg(data->master, PCH_SPCR,
+ ((size - 1) << SPCR_RFIC_FIELD) |
+ (PCH_TX_THOLD << SPCR_TFIC_FIELD),
+ MASK_RFIC_SPCR_BITS | MASK_TFIC_SPCR_BITS);
+
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* RX */
+ dma->sg_rx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_rx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_rx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == (num - 2)) {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else if (i == (num - 1)) {
+ sg->offset = size * (i - 1) + rem;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ } else {
+ sg->offset = size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->rx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->rx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_rx_p;
+ desc_rx = dmaengine_prep_slave_sg(dma->chan_rx, sg,
+ num, DMA_DEV_TO_MEM,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_rx) {
+ dev_err(&data->master->dev,
+ "%s:dmaengine_prep_slave_sg Failed\n", __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_FROM_DEVICE);
+ desc_rx->callback = pch_dma_rx_complete;
+ desc_rx->callback_param = data;
+ dma->nent = num;
+ dma->desc_rx = desc_rx;
+
+ /* Calculate Tx parameter for DMA transmitting */
+ if (data->bpw_len > PCH_MAX_FIFO_DEPTH) {
+ head = PCH_MAX_FIFO_DEPTH - PCH_DMA_TRANS_SIZE;
+ if (data->bpw_len % PCH_DMA_TRANS_SIZE > 4) {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE + 1;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE - head;
+ } else {
+ num = data->bpw_len / PCH_DMA_TRANS_SIZE;
+ rem = data->bpw_len % PCH_DMA_TRANS_SIZE +
+ PCH_DMA_TRANS_SIZE - head;
+ }
+ size = PCH_DMA_TRANS_SIZE;
+ } else {
+ num = 1;
+ size = data->bpw_len;
+ rem = data->bpw_len;
+ head = 0;
+ }
+
+ dma->sg_tx_p = kzalloc(sizeof(struct scatterlist)*num, GFP_ATOMIC);
+ sg_init_table(dma->sg_tx_p, num); /* Initialize SG table */
+ /* offset, length setting */
+ sg = dma->sg_tx_p;
+ for (i = 0; i < num; i++, sg++) {
+ if (i == 0) {
+ sg->offset = 0;
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size + head,
+ sg->offset);
+ sg_dma_len(sg) = size + head;
+ } else if (i == (num - 1)) {
+ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), rem,
+ sg->offset);
+ sg_dma_len(sg) = rem;
+ } else {
+ sg->offset = head + size * i;
+ sg->offset = sg->offset * (*bpw / 8);
+ sg_set_page(sg, virt_to_page(dma->tx_buf_virt), size,
+ sg->offset);
+ sg_dma_len(sg) = size;
+ }
+ sg_dma_address(sg) = dma->tx_buf_dma + sg->offset;
+ }
+ sg = dma->sg_tx_p;
+ desc_tx = dmaengine_prep_slave_sg(dma->chan_tx,
+ sg, num, DMA_MEM_TO_DEV,
+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
+ if (!desc_tx) {
+ dev_err(&data->master->dev,
+ "%s:dmaengine_prep_slave_sg Failed\n", __func__);
+ return;
+ }
+ dma_sync_sg_for_device(&data->master->dev, sg, num, DMA_TO_DEVICE);
+ desc_tx->callback = NULL;
+ desc_tx->callback_param = data;
+ dma->nent = num;
+ dma->desc_tx = desc_tx;
+
+ dev_dbg(&data->master->dev, "%s:Pulling down SSN low - writing 0x2 to SSNXCR\n", __func__);
+
+ spin_lock_irqsave(&data->lock, flags);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_LOW);
+ desc_rx->tx_submit(desc_rx);
+ desc_tx->tx_submit(desc_tx);
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ /* reset transfer complete flag */
+ data->transfer_complete = false;
+}
+
+static void pch_spi_process_messages(struct work_struct *pwork)
+{
+ struct spi_message *pmsg, *tmp;
+ struct pch_spi_data *data;
+ int bpw;
+
+ data = container_of(pwork, struct pch_spi_data, work);
+ dev_dbg(&data->master->dev, "%s data initialized\n", __func__);
+
+ spin_lock(&data->lock);
+ /* check if suspend has been initiated;if yes flush queue */
+ if (data->board_dat->suspend_sts || (data->status == STATUS_EXITING)) {
+ dev_dbg(&data->master->dev,
+ "%s suspend/remove initiated, flushing queue\n", __func__);
+ list_for_each_entry_safe(pmsg, tmp, data->queue.next, queue) {
+ pmsg->status = -EIO;
+
+ if (pmsg->complete) {
+ spin_unlock(&data->lock);
+ pmsg->complete(pmsg->context);
+ spin_lock(&data->lock);
+ }
+
+ /* delete from queue */
+ list_del_init(&pmsg->queue);
+ }
+
+ spin_unlock(&data->lock);
+ return;
+ }
+
+ data->bcurrent_msg_processing = true;
+ dev_dbg(&data->master->dev,
+ "%s Set data->bcurrent_msg_processing= true\n", __func__);
+
+ /* Get the message from the queue and delete it from there. */
+ data->current_msg = list_entry(data->queue.next, struct spi_message,
+ queue);
+
+ list_del_init(&data->current_msg->queue);
+
+ data->current_msg->status = 0;
+
+ pch_spi_select_chip(data, data->current_msg->spi);
+
+ spin_unlock(&data->lock);
+
+ if (data->use_dma)
+ pch_spi_request_dma(data,
+ data->current_msg->spi->bits_per_word);
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_NO_CONTROL);
+ do {
+ int cnt;
+ /* If we are already processing a message get the next
+ transfer structure from the message otherwise retrieve
+ the 1st transfer request from the message. */
+ spin_lock(&data->lock);
+ if (data->cur_trans == NULL) {
+ data->cur_trans =
+ list_entry(data->current_msg->transfers.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting 1st transfer message\n", __func__);
+ } else {
+ data->cur_trans =
+ list_entry(data->cur_trans->transfer_list.next,
+ struct spi_transfer, transfer_list);
+ dev_dbg(&data->master->dev, "%s "
+ ":Getting next transfer message\n", __func__);
+ }
+ spin_unlock(&data->lock);
+
+ if (!data->cur_trans->len)
+ goto out;
+ cnt = (data->cur_trans->len - 1) / PCH_BUF_SIZE + 1;
+ data->save_total_len = data->cur_trans->len;
+ if (data->use_dma) {
+ int i;
+ char *save_rx_buf = data->cur_trans->rx_buf;
+ for (i = 0; i < cnt; i ++) {
+ pch_spi_handle_dma(data, &bpw);
+ if (!pch_spi_start_transfer(data)) {
+ data->transfer_complete = true;
+ data->current_msg->status = -EIO;
+ data->current_msg->complete
+ (data->current_msg->context);
+ data->bcurrent_msg_processing = false;
+ data->current_msg = NULL;
+ data->cur_trans = NULL;
+ goto out;
+ }
+ pch_spi_copy_rx_data_for_dma(data, bpw);
+ }
+ data->cur_trans->rx_buf = save_rx_buf;
+ } else {
+ pch_spi_set_tx(data, &bpw);
+ pch_spi_set_ir(data);
+ pch_spi_copy_rx_data(data, bpw);
+ kfree(data->pkt_rx_buff);
+ data->pkt_rx_buff = NULL;
+ kfree(data->pkt_tx_buff);
+ data->pkt_tx_buff = NULL;
+ }
+ /* increment message count */
+ data->cur_trans->len = data->save_total_len;
+ data->current_msg->actual_length += data->cur_trans->len;
+
+ dev_dbg(&data->master->dev,
+ "%s:data->current_msg->actual_length=%d\n",
+ __func__, data->current_msg->actual_length);
+
+ /* check for delay */
+ if (data->cur_trans->delay_usecs) {
+ dev_dbg(&data->master->dev, "%s:"
+ "delay in usec=%d\n", __func__,
+ data->cur_trans->delay_usecs);
+ udelay(data->cur_trans->delay_usecs);
+ }
+
+ spin_lock(&data->lock);
+
+ /* No more transfer in this message. */
+ if ((data->cur_trans->transfer_list.next) ==
+ &(data->current_msg->transfers)) {
+ pch_spi_nomore_transfer(data);
+ }
+
+ spin_unlock(&data->lock);
+
+ } while (data->cur_trans != NULL);
+
+out:
+ pch_spi_writereg(data->master, PCH_SSNXCR, SSN_HIGH);
+ if (data->use_dma)
+ pch_spi_release_dma(data);
+}
+
+static void pch_spi_free_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
+
+ /* free workqueue */
+ if (data->wk != NULL) {
+ destroy_workqueue(data->wk);
+ data->wk = NULL;
+ dev_dbg(&board_dat->pdev->dev,
+ "%s destroy_workqueue invoked successfully\n",
+ __func__);
+ }
+}
+
+static int pch_spi_get_resources(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ int retval = 0;
+
+ dev_dbg(&board_dat->pdev->dev, "%s ENTRY\n", __func__);
+
+ /* create workqueue */
+ data->wk = create_singlethread_workqueue(KBUILD_MODNAME);
+ if (!data->wk) {
+ dev_err(&board_dat->pdev->dev,
+ "%s create_singlet hread_workqueue failed\n", __func__);
+ retval = -EBUSY;
+ goto err_return;
+ }
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ dev_dbg(&board_dat->pdev->dev,
+ "%s pch_spi_reset invoked successfully\n", __func__);
+
+ dev_dbg(&board_dat->pdev->dev, "%s data->irq_reg_sts=true\n", __func__);
+
+err_return:
+ if (retval != 0) {
+ dev_err(&board_dat->pdev->dev,
+ "%s FAIL:invoking pch_spi_free_resources\n", __func__);
+ pch_spi_free_resources(board_dat, data);
+ }
+
+ dev_dbg(&board_dat->pdev->dev, "%s Return=%d\n", __func__, retval);
+
+ return retval;
+}
+
+static void pch_free_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ if (dma->tx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->tx_buf_virt, dma->tx_buf_dma);
+ if (dma->rx_buf_dma)
+ dma_free_coherent(&board_dat->pdev->dev, PCH_BUF_SIZE,
+ dma->rx_buf_virt, dma->rx_buf_dma);
+ return;
+}
+
+static void pch_alloc_dma_buf(struct pch_spi_board_data *board_dat,
+ struct pch_spi_data *data)
+{
+ struct pch_spi_dma_ctrl *dma;
+
+ dma = &data->dma;
+ /* Get Consistent memory for Tx DMA */
+ dma->tx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->tx_buf_dma, GFP_KERNEL);
+ /* Get Consistent memory for Rx DMA */
+ dma->rx_buf_virt = dma_alloc_coherent(&board_dat->pdev->dev,
+ PCH_BUF_SIZE, &dma->rx_buf_dma, GFP_KERNEL);
+}
+
+static int pch_spi_pd_probe(struct platform_device *plat_dev)
+{
+ int ret;
+ struct spi_master *master;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data;
+
+ dev_dbg(&plat_dev->dev, "%s:debug\n", __func__);
+
+ master = spi_alloc_master(&board_dat->pdev->dev,
+ sizeof(struct pch_spi_data));
+ if (!master) {
+ dev_err(&plat_dev->dev, "spi_alloc_master[%d] failed.\n",
+ plat_dev->id);
+ return -ENOMEM;
+ }
+
+ data = spi_master_get_devdata(master);
+ data->master = master;
+
+ platform_set_drvdata(plat_dev, data);
+
+ /* baseaddress + address offset) */
+ data->io_base_addr = pci_resource_start(board_dat->pdev, 1) +
+ PCH_ADDRESS_SIZE * plat_dev->id;
+ data->io_remap_addr = pci_iomap(board_dat->pdev, 1, 0);
+ if (!data->io_remap_addr) {
+ dev_err(&plat_dev->dev, "%s pci_iomap failed\n", __func__);
+ ret = -ENOMEM;
+ goto err_pci_iomap;
+ }
+ data->io_remap_addr += PCH_ADDRESS_SIZE * plat_dev->id;
+
+ dev_dbg(&plat_dev->dev, "[ch%d] remap_addr=%p\n",
+ plat_dev->id, data->io_remap_addr);
+
+ /* initialize members of SPI master */
+ master->num_chipselect = PCH_MAX_CS;
+ master->transfer = pch_spi_transfer;
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST;
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+ master->max_speed_hz = PCH_MAX_BAUDRATE;
+
+ data->board_dat = board_dat;
+ data->plat_dev = plat_dev;
+ data->n_curnt_chip = 255;
+ data->status = STATUS_RUNNING;
+ data->ch = plat_dev->id;
+ data->use_dma = use_dma;
+
+ INIT_LIST_HEAD(&data->queue);
+ spin_lock_init(&data->lock);
+ INIT_WORK(&data->work, pch_spi_process_messages);
+ init_waitqueue_head(&data->wait);
+
+ ret = pch_spi_get_resources(board_dat, data);
+ if (ret) {
+ dev_err(&plat_dev->dev, "%s fail(retval=%d)\n", __func__, ret);
+ goto err_spi_get_resources;
+ }
+
+ ret = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (ret) {
+ dev_err(&plat_dev->dev,
+ "%s request_irq failed\n", __func__);
+ goto err_request_irq;
+ }
+ data->irq_reg_sts = true;
+
+ pch_spi_set_master_mode(master);
+
+ if (use_dma) {
+ dev_info(&plat_dev->dev, "Use DMA for data transfers\n");
+ pch_alloc_dma_buf(board_dat, data);
+ }
+
+ ret = spi_register_master(master);
+ if (ret != 0) {
+ dev_err(&plat_dev->dev,
+ "%s spi_register_master FAILED\n", __func__);
+ goto err_spi_register_master;
+ }
+
+ return 0;
+
+err_spi_register_master:
+ pch_free_dma_buf(board_dat, data);
+ free_irq(board_dat->pdev->irq, data);
+err_request_irq:
+ pch_spi_free_resources(board_dat, data);
+err_spi_get_resources:
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+err_pci_iomap:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int pch_spi_pd_remove(struct platform_device *plat_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&plat_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(plat_dev);
+ int count;
+ unsigned long flags;
+
+ dev_dbg(&plat_dev->dev, "%s:[ch%d] irq=%d\n",
+ __func__, plat_dev->id, board_dat->pdev->irq);
+
+ if (use_dma)
+ pch_free_dma_buf(board_dat, data);
+
+ /* check for any pending messages; no action is taken if the queue
+ * is still full; but at least we tried. Unload anyway */
+ count = 500;
+ spin_lock_irqsave(&data->lock, flags);
+ data->status = STATUS_EXITING;
+ while ((list_empty(&data->queue) == 0) && --count) {
+ dev_dbg(&board_dat->pdev->dev, "%s :queue not empty\n",
+ __func__);
+ spin_unlock_irqrestore(&data->lock, flags);
+ msleep(PCH_SLEEP_TIME);
+ spin_lock_irqsave(&data->lock, flags);
+ }
+ spin_unlock_irqrestore(&data->lock, flags);
+
+ pch_spi_free_resources(board_dat, data);
+ /* disable interrupts & free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ data->irq_reg_sts = false;
+ free_irq(board_dat->pdev->irq, data);
+ }
+
+ pci_iounmap(board_dat->pdev, data->io_remap_addr);
+ spi_unregister_master(data->master);
+
+ return 0;
+}
+#ifdef CONFIG_PM
+static int pch_spi_pd_suspend(struct platform_device *pd_dev,
+ pm_message_t state)
+{
+ u8 count;
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+
+ dev_dbg(&pd_dev->dev, "%s ENTRY\n", __func__);
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ /* check if the current message is processed:
+ Only after thats done the transfer will be suspended */
+ count = 255;
+ while ((--count) > 0) {
+ if (!(data->bcurrent_msg_processing))
+ break;
+ msleep(PCH_SLEEP_TIME);
+ }
+
+ /* Free IRQ */
+ if (data->irq_reg_sts) {
+ /* disable all interrupts */
+ pch_spi_setclr_reg(data->master, PCH_SPCR, 0, PCH_ALL);
+ pch_spi_reset(data->master);
+ free_irq(board_dat->pdev->irq, data);
+
+ data->irq_reg_sts = false;
+ dev_dbg(&pd_dev->dev,
+ "%s free_irq invoked successfully.\n", __func__);
+ }
+
+ return 0;
+}
+
+static int pch_spi_pd_resume(struct platform_device *pd_dev)
+{
+ struct pch_spi_board_data *board_dat = dev_get_platdata(&pd_dev->dev);
+ struct pch_spi_data *data = platform_get_drvdata(pd_dev);
+ int retval;
+
+ if (!board_dat) {
+ dev_err(&pd_dev->dev,
+ "%s pci_get_drvdata returned NULL\n", __func__);
+ return -EFAULT;
+ }
+
+ if (!data->irq_reg_sts) {
+ /* register IRQ */
+ retval = request_irq(board_dat->pdev->irq, pch_spi_handler,
+ IRQF_SHARED, KBUILD_MODNAME, data);
+ if (retval < 0) {
+ dev_err(&pd_dev->dev,
+ "%s request_irq failed\n", __func__);
+ return retval;
+ }
+
+ /* reset PCH SPI h/w */
+ pch_spi_reset(data->master);
+ pch_spi_set_master_mode(data->master);
+ data->irq_reg_sts = true;
+ }
+ return 0;
+}
+#else
+#define pch_spi_pd_suspend NULL
+#define pch_spi_pd_resume NULL
+#endif
+
+static struct platform_driver pch_spi_pd_driver = {
+ .driver = {
+ .name = "pch-spi",
+ },
+ .probe = pch_spi_pd_probe,
+ .remove = pch_spi_pd_remove,
+ .suspend = pch_spi_pd_suspend,
+ .resume = pch_spi_pd_resume
+};
+
+static int pch_spi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct pch_spi_board_data *board_dat;
+ struct platform_device *pd_dev = NULL;
+ int retval;
+ int i;
+ struct pch_pd_dev_save *pd_dev_save;
+
+ pd_dev_save = kzalloc(sizeof(struct pch_pd_dev_save), GFP_KERNEL);
+ if (!pd_dev_save)
+ return -ENOMEM;
+
+ board_dat = kzalloc(sizeof(struct pch_spi_board_data), GFP_KERNEL);
+ if (!board_dat) {
+ retval = -ENOMEM;
+ goto err_no_mem;
+ }
+
+ retval = pci_request_regions(pdev, KBUILD_MODNAME);
+ if (retval) {
+ dev_err(&pdev->dev, "%s request_region failed\n", __func__);
+ goto pci_request_regions;
+ }
+
+ board_dat->pdev = pdev;
+ board_dat->num = id->driver_data;
+ pd_dev_save->num = id->driver_data;
+ pd_dev_save->board_dat = board_dat;
+
+ retval = pci_enable_device(pdev);
+ if (retval) {
+ dev_err(&pdev->dev, "%s pci_enable_device failed\n", __func__);
+ goto pci_enable_device;
+ }
+
+ for (i = 0; i < board_dat->num; i++) {
+ pd_dev = platform_device_alloc("pch-spi", i);
+ if (!pd_dev) {
+ dev_err(&pdev->dev, "platform_device_alloc failed\n");
+ retval = -ENOMEM;
+ goto err_platform_device;
+ }
+ pd_dev_save->pd_save[i] = pd_dev;
+ pd_dev->dev.parent = &pdev->dev;
+
+ retval = platform_device_add_data(pd_dev, board_dat,
+ sizeof(*board_dat));
+ if (retval) {
+ dev_err(&pdev->dev,
+ "platform_device_add_data failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+
+ retval = platform_device_add(pd_dev);
+ if (retval) {
+ dev_err(&pdev->dev, "platform_device_add failed\n");
+ platform_device_put(pd_dev);
+ goto err_platform_device;
+ }
+ }
+
+ pci_set_drvdata(pdev, pd_dev_save);
+
+ return 0;
+
+err_platform_device:
+ while (--i >= 0)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+ pci_disable_device(pdev);
+pci_enable_device:
+ pci_release_regions(pdev);
+pci_request_regions:
+ kfree(board_dat);
+err_no_mem:
+ kfree(pd_dev_save);
+
+ return retval;
+}
+
+static void pch_spi_remove(struct pci_dev *pdev)
+{
+ int i;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY:pdev=%p\n", __func__, pdev);
+
+ for (i = 0; i < pd_dev_save->num; i++)
+ platform_device_unregister(pd_dev_save->pd_save[i]);
+
+ pci_disable_device(pdev);
+ pci_release_regions(pdev);
+ kfree(pd_dev_save->board_dat);
+ kfree(pd_dev_save);
+}
+
+#ifdef CONFIG_PM
+static int pch_spi_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+ int retval;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+
+ dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+
+ pd_dev_save->board_dat->suspend_sts = true;
+
+ /* save config space */
+ retval = pci_save_state(pdev);
+ if (retval == 0) {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+ pci_disable_device(pdev);
+ pci_set_power_state(pdev, PCI_D3hot);
+ } else {
+ dev_err(&pdev->dev, "%s pci_save_state failed\n", __func__);
+ }
+
+ return retval;
+}
+
+static int pch_spi_resume(struct pci_dev *pdev)
+{
+ int retval;
+ struct pch_pd_dev_save *pd_dev_save = pci_get_drvdata(pdev);
+ dev_dbg(&pdev->dev, "%s ENTRY\n", __func__);
+
+ pci_set_power_state(pdev, PCI_D0);
+ pci_restore_state(pdev);
+
+ retval = pci_enable_device(pdev);
+ if (retval < 0) {
+ dev_err(&pdev->dev,
+ "%s pci_enable_device failed\n", __func__);
+ } else {
+ pci_enable_wake(pdev, PCI_D3hot, 0);
+
+ /* set suspend status to false */
+ pd_dev_save->board_dat->suspend_sts = false;
+ }
+
+ return retval;
+}
+#else
+#define pch_spi_suspend NULL
+#define pch_spi_resume NULL
+
+#endif
+
+static struct pci_driver pch_spi_pcidev_driver = {
+ .name = "pch_spi",
+ .id_table = pch_spi_pcidev_id,
+ .probe = pch_spi_probe,
+ .remove = pch_spi_remove,
+ .suspend = pch_spi_suspend,
+ .resume = pch_spi_resume,
+};
+
+static int __init pch_spi_init(void)
+{
+ int ret;
+ ret = platform_driver_register(&pch_spi_pd_driver);
+ if (ret)
+ return ret;
+
+ ret = pci_register_driver(&pch_spi_pcidev_driver);
+ if (ret) {
+ platform_driver_unregister(&pch_spi_pd_driver);
+ return ret;
+ }
+
+ return 0;
+}
+module_init(pch_spi_init);
+
+static void __exit pch_spi_exit(void)
+{
+ pci_unregister_driver(&pch_spi_pcidev_driver);
+ platform_driver_unregister(&pch_spi_pd_driver);
+}
+module_exit(pch_spi_exit);
+
+module_param(use_dma, int, 0644);
+MODULE_PARM_DESC(use_dma,
+ "to use DMA for data transfers pass 1 else 0; default 1");
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Intel EG20T PCH/LAPIS Semiconductor ML7xxx IOH SPI Driver");
+MODULE_DEVICE_TABLE(pci, pch_spi_pcidev_id);
+
diff --git a/drivers/spi/spi-txx9.c b/drivers/spi/spi-txx9.c
new file mode 100644
index 000000000..9190124b6
--- /dev/null
+++ b/drivers/spi/spi-txx9.c
@@ -0,0 +1,444 @@
+/*
+ * TXx9 SPI controller driver.
+ *
+ * Based on linux/arch/mips/tx4938/toshiba_rbtx4938/spi_txx9.c
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * 2003-2005 (c) MontaVista Software, Inc. This file is licensed under the
+ * terms of the GNU General Public License version 2. This program is
+ * licensed "as is" without any warranty of any kind, whether express
+ * or implied.
+ *
+ * Support for TX4938 in 2.6 - Manish Lachwani (mlachwani@mvista.com)
+ *
+ * Convert to generic SPI framework - Atsushi Nemoto (anemo@mba.ocn.ne.jp)
+ */
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/spi/spi.h>
+#include <linux/err.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+
+
+#define SPI_FIFO_SIZE 4
+#define SPI_MAX_DIVIDER 0xff /* Max. value for SPCR1.SER */
+#define SPI_MIN_DIVIDER 1 /* Min. value for SPCR1.SER */
+
+#define TXx9_SPMCR 0x00
+#define TXx9_SPCR0 0x04
+#define TXx9_SPCR1 0x08
+#define TXx9_SPFS 0x0c
+#define TXx9_SPSR 0x14
+#define TXx9_SPDR 0x18
+
+/* SPMCR : SPI Master Control */
+#define TXx9_SPMCR_OPMODE 0xc0
+#define TXx9_SPMCR_CONFIG 0x40
+#define TXx9_SPMCR_ACTIVE 0x80
+#define TXx9_SPMCR_SPSTP 0x02
+#define TXx9_SPMCR_BCLR 0x01
+
+/* SPCR0 : SPI Control 0 */
+#define TXx9_SPCR0_TXIFL_MASK 0xc000
+#define TXx9_SPCR0_RXIFL_MASK 0x3000
+#define TXx9_SPCR0_SIDIE 0x0800
+#define TXx9_SPCR0_SOEIE 0x0400
+#define TXx9_SPCR0_RBSIE 0x0200
+#define TXx9_SPCR0_TBSIE 0x0100
+#define TXx9_SPCR0_IFSPSE 0x0010
+#define TXx9_SPCR0_SBOS 0x0004
+#define TXx9_SPCR0_SPHA 0x0002
+#define TXx9_SPCR0_SPOL 0x0001
+
+/* SPSR : SPI Status */
+#define TXx9_SPSR_TBSI 0x8000
+#define TXx9_SPSR_RBSI 0x4000
+#define TXx9_SPSR_TBS_MASK 0x3800
+#define TXx9_SPSR_RBS_MASK 0x0700
+#define TXx9_SPSR_SPOE 0x0080
+#define TXx9_SPSR_IFSD 0x0008
+#define TXx9_SPSR_SIDLE 0x0004
+#define TXx9_SPSR_STRDY 0x0002
+#define TXx9_SPSR_SRRDY 0x0001
+
+
+struct txx9spi {
+ struct workqueue_struct *workqueue;
+ struct work_struct work;
+ spinlock_t lock; /* protect 'queue' */
+ struct list_head queue;
+ wait_queue_head_t waitq;
+ void __iomem *membase;
+ int baseclk;
+ struct clk *clk;
+ int last_chipselect;
+ int last_chipselect_val;
+};
+
+static u32 txx9spi_rd(struct txx9spi *c, int reg)
+{
+ return __raw_readl(c->membase + reg);
+}
+static void txx9spi_wr(struct txx9spi *c, u32 val, int reg)
+{
+ __raw_writel(val, c->membase + reg);
+}
+
+static void txx9spi_cs_func(struct spi_device *spi, struct txx9spi *c,
+ int on, unsigned int cs_delay)
+{
+ int val = (spi->mode & SPI_CS_HIGH) ? on : !on;
+
+ if (on) {
+ /* deselect the chip with cs_change hint in last transfer */
+ if (c->last_chipselect >= 0)
+ gpio_set_value(c->last_chipselect,
+ !c->last_chipselect_val);
+ c->last_chipselect = spi->chip_select;
+ c->last_chipselect_val = val;
+ } else {
+ c->last_chipselect = -1;
+ ndelay(cs_delay); /* CS Hold Time */
+ }
+ gpio_set_value(spi->chip_select, val);
+ ndelay(cs_delay); /* CS Setup Time / CS Recovery Time */
+}
+
+static int txx9spi_setup(struct spi_device *spi)
+{
+ struct txx9spi *c = spi_master_get_devdata(spi->master);
+
+ if (!spi->max_speed_hz)
+ return -EINVAL;
+
+ if (gpio_direction_output(spi->chip_select,
+ !(spi->mode & SPI_CS_HIGH))) {
+ dev_err(&spi->dev, "Cannot setup GPIO for chipselect.\n");
+ return -EINVAL;
+ }
+
+ /* deselect chip */
+ spin_lock(&c->lock);
+ txx9spi_cs_func(spi, c, 0, (NSEC_PER_SEC / 2) / spi->max_speed_hz);
+ spin_unlock(&c->lock);
+
+ return 0;
+}
+
+static irqreturn_t txx9spi_interrupt(int irq, void *dev_id)
+{
+ struct txx9spi *c = dev_id;
+
+ /* disable rx intr */
+ txx9spi_wr(c, txx9spi_rd(c, TXx9_SPCR0) & ~TXx9_SPCR0_RBSIE,
+ TXx9_SPCR0);
+ wake_up(&c->waitq);
+ return IRQ_HANDLED;
+}
+
+static void txx9spi_work_one(struct txx9spi *c, struct spi_message *m)
+{
+ struct spi_device *spi = m->spi;
+ struct spi_transfer *t;
+ unsigned int cs_delay;
+ unsigned int cs_change = 1;
+ int status = 0;
+ u32 mcr;
+ u32 prev_speed_hz = 0;
+ u8 prev_bits_per_word = 0;
+
+ /* CS setup/hold/recovery time in nsec */
+ cs_delay = 100 + (NSEC_PER_SEC / 2) / spi->max_speed_hz;
+
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ if (unlikely((mcr & TXx9_SPMCR_OPMODE) == TXx9_SPMCR_ACTIVE)) {
+ dev_err(&spi->dev, "Bad mode.\n");
+ status = -EIO;
+ goto exit;
+ }
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+ txx9spi_wr(c, TXx9_SPCR0_SBOS
+ | ((spi->mode & SPI_CPOL) ? TXx9_SPCR0_SPOL : 0)
+ | ((spi->mode & SPI_CPHA) ? TXx9_SPCR0_SPHA : 0)
+ | 0x08,
+ TXx9_SPCR0);
+
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ const void *txbuf = t->tx_buf;
+ void *rxbuf = t->rx_buf;
+ u32 data;
+ unsigned int len = t->len;
+ unsigned int wsize;
+ u32 speed_hz = t->speed_hz ? : spi->max_speed_hz;
+ u8 bits_per_word = t->bits_per_word;
+
+ wsize = bits_per_word >> 3; /* in bytes */
+
+ if (prev_speed_hz != speed_hz
+ || prev_bits_per_word != bits_per_word) {
+ int n = DIV_ROUND_UP(c->baseclk, speed_hz) - 1;
+
+ n = clamp(n, SPI_MIN_DIVIDER, SPI_MAX_DIVIDER);
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR,
+ TXx9_SPMCR);
+ txx9spi_wr(c, (n << 8) | bits_per_word, TXx9_SPCR1);
+ /* enter active mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_ACTIVE, TXx9_SPMCR);
+
+ prev_speed_hz = speed_hz;
+ prev_bits_per_word = bits_per_word;
+ }
+
+ if (cs_change)
+ txx9spi_cs_func(spi, c, 1, cs_delay);
+ cs_change = t->cs_change;
+ while (len) {
+ unsigned int count = SPI_FIFO_SIZE;
+ int i;
+ u32 cr0;
+
+ if (len < count * wsize)
+ count = len / wsize;
+ /* now tx must be idle... */
+ while (!(txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_SIDLE))
+ cpu_relax();
+ cr0 = txx9spi_rd(c, TXx9_SPCR0);
+ cr0 &= ~TXx9_SPCR0_RXIFL_MASK;
+ cr0 |= (count - 1) << 12;
+ /* enable rx intr */
+ cr0 |= TXx9_SPCR0_RBSIE;
+ txx9spi_wr(c, cr0, TXx9_SPCR0);
+ /* send */
+ for (i = 0; i < count; i++) {
+ if (txbuf) {
+ data = (wsize == 1)
+ ? *(const u8 *)txbuf
+ : *(const u16 *)txbuf;
+ txx9spi_wr(c, data, TXx9_SPDR);
+ txbuf += wsize;
+ } else
+ txx9spi_wr(c, 0, TXx9_SPDR);
+ }
+ /* wait all rx data */
+ wait_event(c->waitq,
+ txx9spi_rd(c, TXx9_SPSR) & TXx9_SPSR_RBSI);
+ /* receive */
+ for (i = 0; i < count; i++) {
+ data = txx9spi_rd(c, TXx9_SPDR);
+ if (rxbuf) {
+ if (wsize == 1)
+ *(u8 *)rxbuf = data;
+ else
+ *(u16 *)rxbuf = data;
+ rxbuf += wsize;
+ }
+ }
+ len -= count * wsize;
+ }
+ m->actual_length += t->len;
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ if (!cs_change)
+ continue;
+ if (t->transfer_list.next == &m->transfers)
+ break;
+ /* sometimes a short mid-message deselect of the chip
+ * may be needed to terminate a mode or command
+ */
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+ }
+
+exit:
+ m->status = status;
+ if (m->complete)
+ m->complete(m->context);
+
+ /* normally deactivate chipselect ... unless no error and
+ * cs_change has hinted that the next message will probably
+ * be for this chip too.
+ */
+ if (!(status == 0 && cs_change))
+ txx9spi_cs_func(spi, c, 0, cs_delay);
+
+ /* enter config mode */
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+}
+
+static void txx9spi_work(struct work_struct *work)
+{
+ struct txx9spi *c = container_of(work, struct txx9spi, work);
+ unsigned long flags;
+
+ spin_lock_irqsave(&c->lock, flags);
+ while (!list_empty(&c->queue)) {
+ struct spi_message *m;
+
+ m = container_of(c->queue.next, struct spi_message, queue);
+ list_del_init(&m->queue);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ txx9spi_work_one(c, m);
+
+ spin_lock_irqsave(&c->lock, flags);
+ }
+ spin_unlock_irqrestore(&c->lock, flags);
+}
+
+static int txx9spi_transfer(struct spi_device *spi, struct spi_message *m)
+{
+ struct spi_master *master = spi->master;
+ struct txx9spi *c = spi_master_get_devdata(master);
+ struct spi_transfer *t;
+ unsigned long flags;
+
+ m->actual_length = 0;
+
+ /* check each transfer's parameters */
+ list_for_each_entry(t, &m->transfers, transfer_list) {
+ if (!t->tx_buf && !t->rx_buf && t->len)
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&c->lock, flags);
+ list_add_tail(&m->queue, &c->queue);
+ queue_work(c->workqueue, &c->work);
+ spin_unlock_irqrestore(&c->lock, flags);
+
+ return 0;
+}
+
+static int txx9spi_probe(struct platform_device *dev)
+{
+ struct spi_master *master;
+ struct txx9spi *c;
+ struct resource *res;
+ int ret = -ENODEV;
+ u32 mcr;
+ int irq;
+
+ master = spi_alloc_master(&dev->dev, sizeof(*c));
+ if (!master)
+ return ret;
+ c = spi_master_get_devdata(master);
+ platform_set_drvdata(dev, master);
+
+ INIT_WORK(&c->work, txx9spi_work);
+ spin_lock_init(&c->lock);
+ INIT_LIST_HEAD(&c->queue);
+ init_waitqueue_head(&c->waitq);
+
+ c->clk = devm_clk_get(&dev->dev, "spi-baseclk");
+ if (IS_ERR(c->clk)) {
+ ret = PTR_ERR(c->clk);
+ c->clk = NULL;
+ goto exit;
+ }
+ ret = clk_enable(c->clk);
+ if (ret) {
+ c->clk = NULL;
+ goto exit;
+ }
+ c->baseclk = clk_get_rate(c->clk);
+ master->min_speed_hz = DIV_ROUND_UP(c->baseclk, SPI_MAX_DIVIDER + 1);
+ master->max_speed_hz = c->baseclk / (SPI_MIN_DIVIDER + 1);
+
+ res = platform_get_resource(dev, IORESOURCE_MEM, 0);
+ c->membase = devm_ioremap_resource(&dev->dev, res);
+ if (IS_ERR(c->membase))
+ goto exit_busy;
+
+ /* enter config mode */
+ mcr = txx9spi_rd(c, TXx9_SPMCR);
+ mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR);
+ txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR);
+
+ irq = platform_get_irq(dev, 0);
+ if (irq < 0)
+ goto exit_busy;
+ ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0,
+ "spi_txx9", c);
+ if (ret)
+ goto exit;
+
+ c->workqueue = create_singlethread_workqueue(
+ dev_name(master->dev.parent));
+ if (!c->workqueue)
+ goto exit_busy;
+ c->last_chipselect = -1;
+
+ dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n",
+ (unsigned long long)res->start, irq,
+ (c->baseclk + 500000) / 1000000);
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CS_HIGH | SPI_CPOL | SPI_CPHA;
+
+ master->bus_num = dev->id;
+ master->setup = txx9spi_setup;
+ master->transfer = txx9spi_transfer;
+ master->num_chipselect = (u16)UINT_MAX; /* any GPIO numbers */
+ master->bits_per_word_mask = SPI_BPW_MASK(8) | SPI_BPW_MASK(16);
+
+ ret = devm_spi_register_master(&dev->dev, master);
+ if (ret)
+ goto exit;
+ return 0;
+exit_busy:
+ ret = -EBUSY;
+exit:
+ if (c->workqueue)
+ destroy_workqueue(c->workqueue);
+ clk_disable(c->clk);
+ spi_master_put(master);
+ return ret;
+}
+
+static int txx9spi_remove(struct platform_device *dev)
+{
+ struct spi_master *master = platform_get_drvdata(dev);
+ struct txx9spi *c = spi_master_get_devdata(master);
+
+ destroy_workqueue(c->workqueue);
+ clk_disable(c->clk);
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:spi_txx9");
+
+static struct platform_driver txx9spi_driver = {
+ .probe = txx9spi_probe,
+ .remove = txx9spi_remove,
+ .driver = {
+ .name = "spi_txx9",
+ },
+};
+
+static int __init txx9spi_init(void)
+{
+ return platform_driver_register(&txx9spi_driver);
+}
+subsys_initcall(txx9spi_init);
+
+static void __exit txx9spi_exit(void)
+{
+ platform_driver_unregister(&txx9spi_driver);
+}
+module_exit(txx9spi_exit);
+
+MODULE_DESCRIPTION("TXx9 SPI Driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-xcomm.c b/drivers/spi/spi-xcomm.c
new file mode 100644
index 000000000..bb478dccf
--- /dev/null
+++ b/drivers/spi/spi-xcomm.c
@@ -0,0 +1,253 @@
+/*
+ * Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver
+ *
+ * Copyright 2012 Analog Devices Inc.
+ * Author: Lars-Peter Clausen <lars@metafoo.de>
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/spi/spi.h>
+#include <asm/unaligned.h>
+
+#define SPI_XCOMM_SETTINGS_LEN_OFFSET 10
+#define SPI_XCOMM_SETTINGS_3WIRE BIT(6)
+#define SPI_XCOMM_SETTINGS_CS_HIGH BIT(5)
+#define SPI_XCOMM_SETTINGS_SAMPLE_END BIT(4)
+#define SPI_XCOMM_SETTINGS_CPHA BIT(3)
+#define SPI_XCOMM_SETTINGS_CPOL BIT(2)
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_MASK 0x3
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_64 0x2
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_16 0x1
+#define SPI_XCOMM_SETTINGS_CLOCK_DIV_4 0x0
+
+#define SPI_XCOMM_CMD_UPDATE_CONFIG 0x03
+#define SPI_XCOMM_CMD_WRITE 0x04
+
+#define SPI_XCOMM_CLOCK 48000000
+
+struct spi_xcomm {
+ struct i2c_client *i2c;
+
+ uint16_t settings;
+ uint16_t chipselect;
+
+ unsigned int current_speed;
+
+ uint8_t buf[63];
+};
+
+static int spi_xcomm_sync_config(struct spi_xcomm *spi_xcomm, unsigned int len)
+{
+ uint16_t settings;
+ uint8_t *buf = spi_xcomm->buf;
+
+ settings = spi_xcomm->settings;
+ settings |= len << SPI_XCOMM_SETTINGS_LEN_OFFSET;
+
+ buf[0] = SPI_XCOMM_CMD_UPDATE_CONFIG;
+ put_unaligned_be16(settings, &buf[1]);
+ put_unaligned_be16(spi_xcomm->chipselect, &buf[3]);
+
+ return i2c_master_send(spi_xcomm->i2c, buf, 5);
+}
+
+static void spi_xcomm_chipselect(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, int is_active)
+{
+ unsigned long cs = spi->chip_select;
+ uint16_t chipselect = spi_xcomm->chipselect;
+
+ if (is_active)
+ chipselect |= BIT(cs);
+ else
+ chipselect &= ~BIT(cs);
+
+ spi_xcomm->chipselect = chipselect;
+}
+
+static int spi_xcomm_setup_transfer(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, struct spi_transfer *t, unsigned int *settings)
+{
+ if (t->len > 62)
+ return -EINVAL;
+
+ if (t->speed_hz != spi_xcomm->current_speed) {
+ unsigned int divider;
+
+ divider = DIV_ROUND_UP(SPI_XCOMM_CLOCK, t->speed_hz);
+ if (divider >= 64)
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_64;
+ else if (divider >= 16)
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_16;
+ else
+ *settings |= SPI_XCOMM_SETTINGS_CLOCK_DIV_4;
+
+ spi_xcomm->current_speed = t->speed_hz;
+ }
+
+ if (spi->mode & SPI_CPOL)
+ *settings |= SPI_XCOMM_SETTINGS_CPOL;
+ else
+ *settings &= ~SPI_XCOMM_SETTINGS_CPOL;
+
+ if (spi->mode & SPI_CPHA)
+ *settings &= ~SPI_XCOMM_SETTINGS_CPHA;
+ else
+ *settings |= SPI_XCOMM_SETTINGS_CPHA;
+
+ if (spi->mode & SPI_3WIRE)
+ *settings |= SPI_XCOMM_SETTINGS_3WIRE;
+ else
+ *settings &= ~SPI_XCOMM_SETTINGS_3WIRE;
+
+ return 0;
+}
+
+static int spi_xcomm_txrx_bufs(struct spi_xcomm *spi_xcomm,
+ struct spi_device *spi, struct spi_transfer *t)
+{
+ int ret;
+
+ if (t->tx_buf) {
+ spi_xcomm->buf[0] = SPI_XCOMM_CMD_WRITE;
+ memcpy(spi_xcomm->buf + 1, t->tx_buf, t->len);
+
+ ret = i2c_master_send(spi_xcomm->i2c, spi_xcomm->buf, t->len + 1);
+ if (ret < 0)
+ return ret;
+ else if (ret != t->len + 1)
+ return -EIO;
+ } else if (t->rx_buf) {
+ ret = i2c_master_recv(spi_xcomm->i2c, t->rx_buf, t->len);
+ if (ret < 0)
+ return ret;
+ else if (ret != t->len)
+ return -EIO;
+ }
+
+ return t->len;
+}
+
+static int spi_xcomm_transfer_one(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_xcomm *spi_xcomm = spi_master_get_devdata(master);
+ unsigned int settings = spi_xcomm->settings;
+ struct spi_device *spi = msg->spi;
+ unsigned cs_change = 0;
+ struct spi_transfer *t;
+ bool is_first = true;
+ int status = 0;
+ bool is_last;
+
+ spi_xcomm_chipselect(spi_xcomm, spi, true);
+
+ list_for_each_entry(t, &msg->transfers, transfer_list) {
+
+ if (!t->tx_buf && !t->rx_buf && t->len) {
+ status = -EINVAL;
+ break;
+ }
+
+ status = spi_xcomm_setup_transfer(spi_xcomm, spi, t, &settings);
+ if (status < 0)
+ break;
+
+ is_last = list_is_last(&t->transfer_list, &msg->transfers);
+ cs_change = t->cs_change;
+
+ if (cs_change ^ is_last)
+ settings |= BIT(5);
+ else
+ settings &= ~BIT(5);
+
+ if (t->rx_buf) {
+ spi_xcomm->settings = settings;
+ status = spi_xcomm_sync_config(spi_xcomm, t->len);
+ if (status < 0)
+ break;
+ } else if (settings != spi_xcomm->settings || is_first) {
+ spi_xcomm->settings = settings;
+ status = spi_xcomm_sync_config(spi_xcomm, 0);
+ if (status < 0)
+ break;
+ }
+
+ if (t->len) {
+ status = spi_xcomm_txrx_bufs(spi_xcomm, spi, t);
+
+ if (status < 0)
+ break;
+
+ if (status > 0)
+ msg->actual_length += status;
+ }
+ status = 0;
+
+ if (t->delay_usecs)
+ udelay(t->delay_usecs);
+
+ is_first = false;
+ }
+
+ if (status != 0 || !cs_change)
+ spi_xcomm_chipselect(spi_xcomm, spi, false);
+
+ msg->status = status;
+ spi_finalize_current_message(master);
+
+ return status;
+}
+
+static int spi_xcomm_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ struct spi_xcomm *spi_xcomm;
+ struct spi_master *master;
+ int ret;
+
+ master = spi_alloc_master(&i2c->dev, sizeof(*spi_xcomm));
+ if (!master)
+ return -ENOMEM;
+
+ spi_xcomm = spi_master_get_devdata(master);
+ spi_xcomm->i2c = i2c;
+
+ master->num_chipselect = 16;
+ master->mode_bits = SPI_CPHA | SPI_CPOL | SPI_3WIRE;
+ master->bits_per_word_mask = SPI_BPW_MASK(8);
+ master->flags = SPI_MASTER_HALF_DUPLEX;
+ master->transfer_one_message = spi_xcomm_transfer_one;
+ master->dev.of_node = i2c->dev.of_node;
+ i2c_set_clientdata(i2c, master);
+
+ ret = devm_spi_register_master(&i2c->dev, master);
+ if (ret < 0)
+ spi_master_put(master);
+
+ return ret;
+}
+
+static const struct i2c_device_id spi_xcomm_ids[] = {
+ { "spi-xcomm" },
+ { },
+};
+
+static struct i2c_driver spi_xcomm_driver = {
+ .driver = {
+ .name = "spi-xcomm",
+ .owner = THIS_MODULE,
+ },
+ .id_table = spi_xcomm_ids,
+ .probe = spi_xcomm_probe,
+};
+module_i2c_driver(spi_xcomm_driver);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("Analog Devices AD-FMCOMMS1-EBZ board I2C-SPI bridge driver");
diff --git a/drivers/spi/spi-xilinx.c b/drivers/spi/spi-xilinx.c
new file mode 100644
index 000000000..133f53a9c
--- /dev/null
+++ b/drivers/spi/spi-xilinx.c
@@ -0,0 +1,509 @@
+/*
+ * Xilinx SPI controller driver (master mode only)
+ *
+ * Author: MontaVista Software, Inc.
+ * source@mvista.com
+ *
+ * Copyright (c) 2010 Secret Lab Technologies, Ltd.
+ * Copyright (c) 2009 Intel Corporation
+ * 2002-2007 (c) MontaVista Software, Inc.
+
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+#include <linux/spi/xilinx_spi.h>
+#include <linux/io.h>
+
+#define XILINX_SPI_MAX_CS 32
+
+#define XILINX_SPI_NAME "xilinx_spi"
+
+/* Register definitions as per "OPB Serial Peripheral Interface (SPI) (v1.00e)
+ * Product Specification", DS464
+ */
+#define XSPI_CR_OFFSET 0x60 /* Control Register */
+
+#define XSPI_CR_LOOP 0x01
+#define XSPI_CR_ENABLE 0x02
+#define XSPI_CR_MASTER_MODE 0x04
+#define XSPI_CR_CPOL 0x08
+#define XSPI_CR_CPHA 0x10
+#define XSPI_CR_MODE_MASK (XSPI_CR_CPHA | XSPI_CR_CPOL | \
+ XSPI_CR_LSB_FIRST | XSPI_CR_LOOP)
+#define XSPI_CR_TXFIFO_RESET 0x20
+#define XSPI_CR_RXFIFO_RESET 0x40
+#define XSPI_CR_MANUAL_SSELECT 0x80
+#define XSPI_CR_TRANS_INHIBIT 0x100
+#define XSPI_CR_LSB_FIRST 0x200
+
+#define XSPI_SR_OFFSET 0x64 /* Status Register */
+
+#define XSPI_SR_RX_EMPTY_MASK 0x01 /* Receive FIFO is empty */
+#define XSPI_SR_RX_FULL_MASK 0x02 /* Receive FIFO is full */
+#define XSPI_SR_TX_EMPTY_MASK 0x04 /* Transmit FIFO is empty */
+#define XSPI_SR_TX_FULL_MASK 0x08 /* Transmit FIFO is full */
+#define XSPI_SR_MODE_FAULT_MASK 0x10 /* Mode fault error */
+
+#define XSPI_TXD_OFFSET 0x68 /* Data Transmit Register */
+#define XSPI_RXD_OFFSET 0x6c /* Data Receive Register */
+
+#define XSPI_SSR_OFFSET 0x70 /* 32-bit Slave Select Register */
+
+/* Register definitions as per "OPB IPIF (v3.01c) Product Specification", DS414
+ * IPIF registers are 32 bit
+ */
+#define XIPIF_V123B_DGIER_OFFSET 0x1c /* IPIF global int enable reg */
+#define XIPIF_V123B_GINTR_ENABLE 0x80000000
+
+#define XIPIF_V123B_IISR_OFFSET 0x20 /* IPIF interrupt status reg */
+#define XIPIF_V123B_IIER_OFFSET 0x28 /* IPIF interrupt enable reg */
+
+#define XSPI_INTR_MODE_FAULT 0x01 /* Mode fault error */
+#define XSPI_INTR_SLAVE_MODE_FAULT 0x02 /* Selected as slave while
+ * disabled */
+#define XSPI_INTR_TX_EMPTY 0x04 /* TxFIFO is empty */
+#define XSPI_INTR_TX_UNDERRUN 0x08 /* TxFIFO was underrun */
+#define XSPI_INTR_RX_FULL 0x10 /* RxFIFO is full */
+#define XSPI_INTR_RX_OVERRUN 0x20 /* RxFIFO was overrun */
+#define XSPI_INTR_TX_HALF_EMPTY 0x40 /* TxFIFO is half empty */
+
+#define XIPIF_V123B_RESETR_OFFSET 0x40 /* IPIF reset register */
+#define XIPIF_V123B_RESET_MASK 0x0a /* the value to write */
+
+struct xilinx_spi {
+ /* bitbang has to be first */
+ struct spi_bitbang bitbang;
+ struct completion done;
+ void __iomem *regs; /* virt. address of the control registers */
+
+ int irq;
+
+ u8 *rx_ptr; /* pointer in the Tx buffer */
+ const u8 *tx_ptr; /* pointer in the Rx buffer */
+ u8 bytes_per_word;
+ int buffer_size; /* buffer size in words */
+ u32 cs_inactive; /* Level of the CS pins when inactive*/
+ unsigned int (*read_fn)(void __iomem *);
+ void (*write_fn)(u32, void __iomem *);
+};
+
+static void xspi_write32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static unsigned int xspi_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void xspi_write32_be(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static unsigned int xspi_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static void xilinx_spi_tx(struct xilinx_spi *xspi)
+{
+ u32 data = 0;
+
+ if (!xspi->tx_ptr) {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ return;
+ }
+
+ switch (xspi->bytes_per_word) {
+ case 1:
+ data = *(u8 *)(xspi->tx_ptr);
+ break;
+ case 2:
+ data = *(u16 *)(xspi->tx_ptr);
+ break;
+ case 4:
+ data = *(u32 *)(xspi->tx_ptr);
+ break;
+ }
+
+ xspi->write_fn(data, xspi->regs + XSPI_TXD_OFFSET);
+ xspi->tx_ptr += xspi->bytes_per_word;
+}
+
+static void xilinx_spi_rx(struct xilinx_spi *xspi)
+{
+ u32 data = xspi->read_fn(xspi->regs + XSPI_RXD_OFFSET);
+
+ if (!xspi->rx_ptr)
+ return;
+
+ switch (xspi->bytes_per_word) {
+ case 1:
+ *(u8 *)(xspi->rx_ptr) = data;
+ break;
+ case 2:
+ *(u16 *)(xspi->rx_ptr) = data;
+ break;
+ case 4:
+ *(u32 *)(xspi->rx_ptr) = data;
+ break;
+ }
+
+ xspi->rx_ptr += xspi->bytes_per_word;
+}
+
+static void xspi_init_hw(struct xilinx_spi *xspi)
+{
+ void __iomem *regs_base = xspi->regs;
+
+ /* Reset the SPI device */
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ regs_base + XIPIF_V123B_RESETR_OFFSET);
+ /* Enable the transmit empty interrupt, which we use to determine
+ * progress on the transmission.
+ */
+ xspi->write_fn(XSPI_INTR_TX_EMPTY,
+ regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+ /* Deselect the slave on the SPI bus */
+ xspi->write_fn(0xffff, regs_base + XSPI_SSR_OFFSET);
+ /* Disable the transmitter, enable Manual Slave Select Assertion,
+ * put SPI controller into master mode, and enable it */
+ xspi->write_fn(XSPI_CR_MANUAL_SSELECT | XSPI_CR_MASTER_MODE |
+ XSPI_CR_ENABLE | XSPI_CR_TXFIFO_RESET | XSPI_CR_RXFIFO_RESET,
+ regs_base + XSPI_CR_OFFSET);
+}
+
+static void xilinx_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ u16 cr;
+ u32 cs;
+
+ if (is_on == BITBANG_CS_INACTIVE) {
+ /* Deselect the slave on the SPI bus */
+ xspi->write_fn(xspi->cs_inactive, xspi->regs + XSPI_SSR_OFFSET);
+ return;
+ }
+
+ /* Set the SPI clock phase and polarity */
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET) & ~XSPI_CR_MODE_MASK;
+ if (spi->mode & SPI_CPHA)
+ cr |= XSPI_CR_CPHA;
+ if (spi->mode & SPI_CPOL)
+ cr |= XSPI_CR_CPOL;
+ if (spi->mode & SPI_LSB_FIRST)
+ cr |= XSPI_CR_LSB_FIRST;
+ if (spi->mode & SPI_LOOP)
+ cr |= XSPI_CR_LOOP;
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+
+ /* We do not check spi->max_speed_hz here as the SPI clock
+ * frequency is not software programmable (the IP block design
+ * parameter)
+ */
+
+ cs = xspi->cs_inactive;
+ cs ^= BIT(spi->chip_select);
+
+ /* Activate the chip select */
+ xspi->write_fn(cs, xspi->regs + XSPI_SSR_OFFSET);
+}
+
+/* spi_bitbang requires custom setup_transfer() to be defined if there is a
+ * custom txrx_bufs().
+ */
+static int xilinx_spi_setup_transfer(struct spi_device *spi,
+ struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+
+ if (spi->mode & SPI_CS_HIGH)
+ xspi->cs_inactive &= ~BIT(spi->chip_select);
+ else
+ xspi->cs_inactive |= BIT(spi->chip_select);
+
+ return 0;
+}
+
+static int xilinx_spi_txrx_bufs(struct spi_device *spi, struct spi_transfer *t)
+{
+ struct xilinx_spi *xspi = spi_master_get_devdata(spi->master);
+ int remaining_words; /* the number of words left to transfer */
+ bool use_irq = false;
+ u16 cr = 0;
+
+ /* We get here with transmitter inhibited */
+
+ xspi->tx_ptr = t->tx_buf;
+ xspi->rx_ptr = t->rx_buf;
+ remaining_words = t->len / xspi->bytes_per_word;
+ reinit_completion(&xspi->done);
+
+ if (xspi->irq >= 0 && remaining_words > xspi->buffer_size) {
+ use_irq = true;
+ xspi->write_fn(XSPI_INTR_TX_EMPTY,
+ xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ /* Enable the global IPIF interrupt */
+ xspi->write_fn(XIPIF_V123B_GINTR_ENABLE,
+ xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+ /* Inhibit irq to avoid spurious irqs on tx_empty*/
+ cr = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+ }
+
+ while (remaining_words) {
+ int n_words, tx_words, rx_words;
+
+ n_words = min(remaining_words, xspi->buffer_size);
+
+ tx_words = n_words;
+ while (tx_words--)
+ xilinx_spi_tx(xspi);
+
+ /* Start the transfer by not inhibiting the transmitter any
+ * longer
+ */
+
+ if (use_irq) {
+ xspi->write_fn(cr, xspi->regs + XSPI_CR_OFFSET);
+ wait_for_completion(&xspi->done);
+ } else
+ while (!(xspi->read_fn(xspi->regs + XSPI_SR_OFFSET) &
+ XSPI_SR_TX_EMPTY_MASK))
+ ;
+
+ /* A transmit has just completed. Process received data and
+ * check for more data to transmit. Always inhibit the
+ * transmitter while the Isr refills the transmit register/FIFO,
+ * or make sure it is stopped if we're done.
+ */
+ if (use_irq)
+ xspi->write_fn(cr | XSPI_CR_TRANS_INHIBIT,
+ xspi->regs + XSPI_CR_OFFSET);
+
+ /* Read out all the data from the Rx FIFO */
+ rx_words = n_words;
+ while (rx_words--)
+ xilinx_spi_rx(xspi);
+
+ remaining_words -= n_words;
+ }
+
+ if (use_irq)
+ xspi->write_fn(0, xspi->regs + XIPIF_V123B_DGIER_OFFSET);
+
+ return t->len;
+}
+
+
+/* This driver supports single master mode only. Hence Tx FIFO Empty
+ * is the only interrupt we care about.
+ * Receive FIFO Overrun, Transmit FIFO Underrun, Mode Fault, and Slave Mode
+ * Fault are not to happen.
+ */
+static irqreturn_t xilinx_spi_irq(int irq, void *dev_id)
+{
+ struct xilinx_spi *xspi = dev_id;
+ u32 ipif_isr;
+
+ /* Get the IPIF interrupts, and clear them immediately */
+ ipif_isr = xspi->read_fn(xspi->regs + XIPIF_V123B_IISR_OFFSET);
+ xspi->write_fn(ipif_isr, xspi->regs + XIPIF_V123B_IISR_OFFSET);
+
+ if (ipif_isr & XSPI_INTR_TX_EMPTY) { /* Transmission completed */
+ complete(&xspi->done);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int xilinx_spi_find_buffer_size(struct xilinx_spi *xspi)
+{
+ u8 sr;
+ int n_words = 0;
+
+ /*
+ * Before the buffer_size detection we reset the core
+ * to make sure we start with a clean state.
+ */
+ xspi->write_fn(XIPIF_V123B_RESET_MASK,
+ xspi->regs + XIPIF_V123B_RESETR_OFFSET);
+
+ /* Fill the Tx FIFO with as many words as possible */
+ do {
+ xspi->write_fn(0, xspi->regs + XSPI_TXD_OFFSET);
+ sr = xspi->read_fn(xspi->regs + XSPI_SR_OFFSET);
+ n_words++;
+ } while (!(sr & XSPI_SR_TX_FULL_MASK));
+
+ return n_words;
+}
+
+static const struct of_device_id xilinx_spi_of_match[] = {
+ { .compatible = "xlnx,xps-spi-2.00.a", },
+ { .compatible = "xlnx,xps-spi-2.00.b", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xilinx_spi_of_match);
+
+static int xilinx_spi_probe(struct platform_device *pdev)
+{
+ struct xilinx_spi *xspi;
+ struct xspi_platform_data *pdata;
+ struct resource *res;
+ int ret, num_cs = 0, bits_per_word = 8;
+ struct spi_master *master;
+ u32 tmp;
+ u8 i;
+
+ pdata = dev_get_platdata(&pdev->dev);
+ if (pdata) {
+ num_cs = pdata->num_chipselect;
+ bits_per_word = pdata->bits_per_word;
+ } else {
+ of_property_read_u32(pdev->dev.of_node, "xlnx,num-ss-bits",
+ &num_cs);
+ }
+
+ if (!num_cs) {
+ dev_err(&pdev->dev,
+ "Missing slave select configuration data\n");
+ return -EINVAL;
+ }
+
+ if (num_cs > XILINX_SPI_MAX_CS) {
+ dev_err(&pdev->dev, "Invalid number of spi slaves\n");
+ return -EINVAL;
+ }
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xilinx_spi));
+ if (!master)
+ return -ENODEV;
+
+ /* the spi->mode bits understood by this driver: */
+ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_LSB_FIRST | SPI_LOOP |
+ SPI_CS_HIGH;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->cs_inactive = 0xffffffff;
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xilinx_spi_chipselect;
+ xspi->bitbang.setup_transfer = xilinx_spi_setup_transfer;
+ xspi->bitbang.txrx_bufs = xilinx_spi_txrx_bufs;
+ init_completion(&xspi->done);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ xspi->regs = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto put_master;
+ }
+
+ master->bus_num = pdev->id;
+ master->num_chipselect = num_cs;
+ master->dev.of_node = pdev->dev.of_node;
+
+ /*
+ * Detect endianess on the IP via loop bit in CR. Detection
+ * must be done before reset is sent because incorrect reset
+ * value generates error interrupt.
+ * Setup little endian helper functions first and try to use them
+ * and check if bit was correctly setup or not.
+ */
+ xspi->read_fn = xspi_read32;
+ xspi->write_fn = xspi_write32;
+
+ xspi->write_fn(XSPI_CR_LOOP, xspi->regs + XSPI_CR_OFFSET);
+ tmp = xspi->read_fn(xspi->regs + XSPI_CR_OFFSET);
+ tmp &= XSPI_CR_LOOP;
+ if (tmp != XSPI_CR_LOOP) {
+ xspi->read_fn = xspi_read32_be;
+ xspi->write_fn = xspi_write32_be;
+ }
+
+ master->bits_per_word_mask = SPI_BPW_MASK(bits_per_word);
+ xspi->bytes_per_word = bits_per_word / 8;
+ xspi->buffer_size = xilinx_spi_find_buffer_size(xspi);
+
+ xspi->irq = platform_get_irq(pdev, 0);
+ if (xspi->irq >= 0) {
+ /* Register for SPI Interrupt */
+ ret = devm_request_irq(&pdev->dev, xspi->irq, xilinx_spi_irq, 0,
+ dev_name(&pdev->dev), xspi);
+ if (ret)
+ goto put_master;
+ }
+
+ /* SPI controller initializations */
+ xspi_init_hw(xspi);
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret) {
+ dev_err(&pdev->dev, "spi_bitbang_start FAILED\n");
+ goto put_master;
+ }
+
+ dev_info(&pdev->dev, "at 0x%08llX mapped to 0x%p, irq=%d\n",
+ (unsigned long long)res->start, xspi->regs, xspi->irq);
+
+ if (pdata) {
+ for (i = 0; i < pdata->num_devices; i++)
+ spi_new_device(master, pdata->devices + i);
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+
+put_master:
+ spi_master_put(master);
+
+ return ret;
+}
+
+static int xilinx_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xilinx_spi *xspi = spi_master_get_devdata(master);
+ void __iomem *regs_base = xspi->regs;
+
+ spi_bitbang_stop(&xspi->bitbang);
+
+ /* Disable all the interrupts just in case */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_IIER_OFFSET);
+ /* Disable the global IPIF interrupt */
+ xspi->write_fn(0, regs_base + XIPIF_V123B_DGIER_OFFSET);
+
+ spi_master_put(xspi->bitbang.master);
+
+ return 0;
+}
+
+/* work with hotplug and coldplug */
+MODULE_ALIAS("platform:" XILINX_SPI_NAME);
+
+static struct platform_driver xilinx_spi_driver = {
+ .probe = xilinx_spi_probe,
+ .remove = xilinx_spi_remove,
+ .driver = {
+ .name = XILINX_SPI_NAME,
+ .of_match_table = xilinx_spi_of_match,
+ },
+};
+module_platform_driver(xilinx_spi_driver);
+
+MODULE_AUTHOR("MontaVista Software, Inc. <source@mvista.com>");
+MODULE_DESCRIPTION("Xilinx SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi-xtensa-xtfpga.c b/drivers/spi/spi-xtensa-xtfpga.c
new file mode 100644
index 000000000..2e32ea2f1
--- /dev/null
+++ b/drivers/spi/spi-xtensa-xtfpga.c
@@ -0,0 +1,170 @@
+/*
+ * Xtensa xtfpga SPI controller driver
+ *
+ * Copyright (c) 2014 Cadence Design Systems Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi_bitbang.h>
+
+#define XTFPGA_SPI_NAME "xtfpga_spi"
+
+#define XTFPGA_SPI_START 0x0
+#define XTFPGA_SPI_BUSY 0x4
+#define XTFPGA_SPI_DATA 0x8
+
+#define BUSY_WAIT_US 100
+
+struct xtfpga_spi {
+ struct spi_bitbang bitbang;
+ void __iomem *regs;
+ u32 data;
+ unsigned data_sz;
+};
+
+static inline void xtfpga_spi_write32(const struct xtfpga_spi *spi,
+ unsigned addr, u32 val)
+{
+ iowrite32(val, spi->regs + addr);
+}
+
+static inline unsigned int xtfpga_spi_read32(const struct xtfpga_spi *spi,
+ unsigned addr)
+{
+ return ioread32(spi->regs + addr);
+}
+
+static inline void xtfpga_spi_wait_busy(struct xtfpga_spi *xspi)
+{
+ unsigned i;
+
+ for (i = 0; xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY) &&
+ i < BUSY_WAIT_US; ++i)
+ udelay(1);
+ WARN_ON_ONCE(i == BUSY_WAIT_US);
+}
+
+static u32 xtfpga_spi_txrx_word(struct spi_device *spi, unsigned nsecs,
+ u32 v, u8 bits)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ xspi->data = (xspi->data << bits) | (v & GENMASK(bits - 1, 0));
+ xspi->data_sz += bits;
+ if (xspi->data_sz >= 16) {
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_DATA,
+ xspi->data >> (xspi->data_sz - 16));
+ xspi->data_sz -= 16;
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 1);
+ xtfpga_spi_wait_busy(xspi);
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ }
+
+ return 0;
+}
+
+static void xtfpga_spi_chipselect(struct spi_device *spi, int is_on)
+{
+ struct xtfpga_spi *xspi = spi_master_get_devdata(spi->master);
+
+ WARN_ON(xspi->data_sz != 0);
+ xspi->data_sz = 0;
+}
+
+static int xtfpga_spi_probe(struct platform_device *pdev)
+{
+ struct xtfpga_spi *xspi;
+ struct resource *mem;
+ int ret;
+ struct spi_master *master;
+
+ master = spi_alloc_master(&pdev->dev, sizeof(struct xtfpga_spi));
+ if (!master)
+ return -ENOMEM;
+
+ master->flags = SPI_MASTER_NO_RX;
+ master->bits_per_word_mask = SPI_BPW_RANGE_MASK(1, 16);
+ master->bus_num = pdev->dev.id;
+ master->dev.of_node = pdev->dev.of_node;
+
+ xspi = spi_master_get_devdata(master);
+ xspi->bitbang.master = master;
+ xspi->bitbang.chipselect = xtfpga_spi_chipselect;
+ xspi->bitbang.txrx_word[SPI_MODE_0] = xtfpga_spi_txrx_word;
+
+ mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ if (!mem) {
+ dev_err(&pdev->dev, "No memory resource\n");
+ ret = -ENODEV;
+ goto err;
+ }
+ xspi->regs = devm_ioremap_resource(&pdev->dev, mem);
+ if (IS_ERR(xspi->regs)) {
+ ret = PTR_ERR(xspi->regs);
+ goto err;
+ }
+
+ xtfpga_spi_write32(xspi, XTFPGA_SPI_START, 0);
+ usleep_range(1000, 2000);
+ if (xtfpga_spi_read32(xspi, XTFPGA_SPI_BUSY)) {
+ dev_err(&pdev->dev, "Device stuck in busy state\n");
+ ret = -EBUSY;
+ goto err;
+ }
+
+ ret = spi_bitbang_start(&xspi->bitbang);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "spi_bitbang_start failed\n");
+ goto err;
+ }
+
+ platform_set_drvdata(pdev, master);
+ return 0;
+err:
+ spi_master_put(master);
+ return ret;
+}
+
+static int xtfpga_spi_remove(struct platform_device *pdev)
+{
+ struct spi_master *master = platform_get_drvdata(pdev);
+ struct xtfpga_spi *xspi = spi_master_get_devdata(master);
+
+ spi_bitbang_stop(&xspi->bitbang);
+ spi_master_put(master);
+
+ return 0;
+}
+
+MODULE_ALIAS("platform:" XTFPGA_SPI_NAME);
+
+#ifdef CONFIG_OF
+static const struct of_device_id xtfpga_spi_of_match[] = {
+ { .compatible = "cdns,xtfpga-spi", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, xtfpga_spi_of_match);
+#endif
+
+static struct platform_driver xtfpga_spi_driver = {
+ .probe = xtfpga_spi_probe,
+ .remove = xtfpga_spi_remove,
+ .driver = {
+ .name = XTFPGA_SPI_NAME,
+ .of_match_table = of_match_ptr(xtfpga_spi_of_match),
+ },
+};
+module_platform_driver(xtfpga_spi_driver);
+
+MODULE_AUTHOR("Max Filippov <jcmvbkbc@gmail.com>");
+MODULE_DESCRIPTION("xtensa xtfpga SPI driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
new file mode 100644
index 000000000..d35c1a132
--- /dev/null
+++ b/drivers/spi/spi.c
@@ -0,0 +1,2408 @@
+/*
+ * SPI init/core code
+ *
+ * Copyright (C) 2005 David Brownell
+ * Copyright (C) 2008 Secret Lab Technologies Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmaengine.h>
+#include <linux/mutex.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/slab.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/of_gpio.h>
+#include <linux/pm_runtime.h>
+#include <linux/pm_domain.h>
+#include <linux/export.h>
+#include <linux/sched/rt.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/ioport.h>
+#include <linux/acpi.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/spi.h>
+
+static void spidev_release(struct device *dev)
+{
+ struct spi_device *spi = to_spi_device(dev);
+
+ /* spi masters may cleanup for released devices */
+ if (spi->master->cleanup)
+ spi->master->cleanup(spi);
+
+ spi_master_put(spi->master);
+ kfree(spi);
+}
+
+static ssize_t
+modalias_show(struct device *dev, struct device_attribute *a, char *buf)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ int len;
+
+ len = acpi_device_modalias(dev, buf, PAGE_SIZE - 1);
+ if (len != -ENODEV)
+ return len;
+
+ return sprintf(buf, "%s%s\n", SPI_MODULE_PREFIX, spi->modalias);
+}
+static DEVICE_ATTR_RO(modalias);
+
+static struct attribute *spi_dev_attrs[] = {
+ &dev_attr_modalias.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(spi_dev);
+
+/* modalias support makes "modprobe $MODALIAS" new-style hotplug work,
+ * and the sysfs version makes coldplug work too.
+ */
+
+static const struct spi_device_id *spi_match_id(const struct spi_device_id *id,
+ const struct spi_device *sdev)
+{
+ while (id->name[0]) {
+ if (!strcmp(sdev->modalias, id->name))
+ return id;
+ id++;
+ }
+ return NULL;
+}
+
+const struct spi_device_id *spi_get_device_id(const struct spi_device *sdev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(sdev->dev.driver);
+
+ return spi_match_id(sdrv->id_table, sdev);
+}
+EXPORT_SYMBOL_GPL(spi_get_device_id);
+
+static int spi_match_device(struct device *dev, struct device_driver *drv)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ const struct spi_driver *sdrv = to_spi_driver(drv);
+
+ /* Attempt an OF style match */
+ if (of_driver_match_device(dev, drv))
+ return 1;
+
+ /* Then try ACPI */
+ if (acpi_driver_match_device(dev, drv))
+ return 1;
+
+ if (sdrv->id_table)
+ return !!spi_match_id(sdrv->id_table, spi);
+
+ return strcmp(spi->modalias, drv->name) == 0;
+}
+
+static int spi_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ const struct spi_device *spi = to_spi_device(dev);
+ int rc;
+
+ rc = acpi_device_uevent_modalias(dev, env);
+ if (rc != -ENODEV)
+ return rc;
+
+ add_uevent_var(env, "MODALIAS=%s%s", SPI_MODULE_PREFIX, spi->modalias);
+ return 0;
+}
+
+struct bus_type spi_bus_type = {
+ .name = "spi",
+ .dev_groups = spi_dev_groups,
+ .match = spi_match_device,
+ .uevent = spi_uevent,
+};
+EXPORT_SYMBOL_GPL(spi_bus_type);
+
+
+static int spi_drv_probe(struct device *dev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ int ret;
+
+ ret = of_clk_set_defaults(dev->of_node, false);
+ if (ret)
+ return ret;
+
+ ret = dev_pm_domain_attach(dev, true);
+ if (ret != -EPROBE_DEFER) {
+ ret = sdrv->probe(to_spi_device(dev));
+ if (ret)
+ dev_pm_domain_detach(dev, true);
+ }
+
+ return ret;
+}
+
+static int spi_drv_remove(struct device *dev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+ int ret;
+
+ ret = sdrv->remove(to_spi_device(dev));
+ dev_pm_domain_detach(dev, true);
+
+ return ret;
+}
+
+static void spi_drv_shutdown(struct device *dev)
+{
+ const struct spi_driver *sdrv = to_spi_driver(dev->driver);
+
+ sdrv->shutdown(to_spi_device(dev));
+}
+
+/**
+ * spi_register_driver - register a SPI driver
+ * @sdrv: the driver to register
+ * Context: can sleep
+ */
+int spi_register_driver(struct spi_driver *sdrv)
+{
+ sdrv->driver.bus = &spi_bus_type;
+ if (sdrv->probe)
+ sdrv->driver.probe = spi_drv_probe;
+ if (sdrv->remove)
+ sdrv->driver.remove = spi_drv_remove;
+ if (sdrv->shutdown)
+ sdrv->driver.shutdown = spi_drv_shutdown;
+ return driver_register(&sdrv->driver);
+}
+EXPORT_SYMBOL_GPL(spi_register_driver);
+
+/*-------------------------------------------------------------------------*/
+
+/* SPI devices should normally not be created by SPI device drivers; that
+ * would make them board-specific. Similarly with SPI master drivers.
+ * Device registration normally goes into like arch/.../mach.../board-YYY.c
+ * with other readonly (flashable) information about mainboard devices.
+ */
+
+struct boardinfo {
+ struct list_head list;
+ struct spi_board_info board_info;
+};
+
+static LIST_HEAD(board_list);
+static LIST_HEAD(spi_master_list);
+
+/*
+ * Used to protect add/del opertion for board_info list and
+ * spi_master list, and their matching process
+ */
+static DEFINE_MUTEX(board_lock);
+
+/**
+ * spi_alloc_device - Allocate a new SPI device
+ * @master: Controller to which device is connected
+ * Context: can sleep
+ *
+ * Allows a driver to allocate and initialize a spi_device without
+ * registering it immediately. This allows a driver to directly
+ * fill the spi_device with device parameters before calling
+ * spi_add_device() on it.
+ *
+ * Caller is responsible to call spi_add_device() on the returned
+ * spi_device structure to add it to the SPI master. If the caller
+ * needs to discard the spi_device without adding it, then it should
+ * call spi_dev_put() on it.
+ *
+ * Returns a pointer to the new device, or NULL.
+ */
+struct spi_device *spi_alloc_device(struct spi_master *master)
+{
+ struct spi_device *spi;
+
+ if (!spi_master_get(master))
+ return NULL;
+
+ spi = kzalloc(sizeof(*spi), GFP_KERNEL);
+ if (!spi) {
+ spi_master_put(master);
+ return NULL;
+ }
+
+ spi->master = master;
+ spi->dev.parent = &master->dev;
+ spi->dev.bus = &spi_bus_type;
+ spi->dev.release = spidev_release;
+ spi->cs_gpio = -ENOENT;
+ device_initialize(&spi->dev);
+ return spi;
+}
+EXPORT_SYMBOL_GPL(spi_alloc_device);
+
+static void spi_dev_set_name(struct spi_device *spi)
+{
+ struct acpi_device *adev = ACPI_COMPANION(&spi->dev);
+
+ if (adev) {
+ dev_set_name(&spi->dev, "spi-%s", acpi_dev_name(adev));
+ return;
+ }
+
+ dev_set_name(&spi->dev, "%s.%u", dev_name(&spi->master->dev),
+ spi->chip_select);
+}
+
+static int spi_dev_check(struct device *dev, void *data)
+{
+ struct spi_device *spi = to_spi_device(dev);
+ struct spi_device *new_spi = data;
+
+ if (spi->master == new_spi->master &&
+ spi->chip_select == new_spi->chip_select)
+ return -EBUSY;
+ return 0;
+}
+
+/**
+ * spi_add_device - Add spi_device allocated with spi_alloc_device
+ * @spi: spi_device to register
+ *
+ * Companion function to spi_alloc_device. Devices allocated with
+ * spi_alloc_device can be added onto the spi bus with this function.
+ *
+ * Returns 0 on success; negative errno on failure
+ */
+int spi_add_device(struct spi_device *spi)
+{
+ static DEFINE_MUTEX(spi_add_lock);
+ struct spi_master *master = spi->master;
+ struct device *dev = master->dev.parent;
+ int status;
+
+ /* Chipselects are numbered 0..max; validate. */
+ if (spi->chip_select >= master->num_chipselect) {
+ dev_err(dev, "cs%d >= max %d\n",
+ spi->chip_select,
+ master->num_chipselect);
+ return -EINVAL;
+ }
+
+ /* Set the bus ID string */
+ spi_dev_set_name(spi);
+
+ /* We need to make sure there's no other device with this
+ * chipselect **BEFORE** we call setup(), else we'll trash
+ * its configuration. Lock against concurrent add() calls.
+ */
+ mutex_lock(&spi_add_lock);
+
+ status = bus_for_each_dev(&spi_bus_type, NULL, spi, spi_dev_check);
+ if (status) {
+ dev_err(dev, "chipselect %d already in use\n",
+ spi->chip_select);
+ goto done;
+ }
+
+ if (master->cs_gpios)
+ spi->cs_gpio = master->cs_gpios[spi->chip_select];
+
+ /* Drivers may modify this initial i/o setup, but will
+ * normally rely on the device being setup. Devices
+ * using SPI_CS_HIGH can't coexist well otherwise...
+ */
+ status = spi_setup(spi);
+ if (status < 0) {
+ dev_err(dev, "can't setup %s, status %d\n",
+ dev_name(&spi->dev), status);
+ goto done;
+ }
+
+ /* Device may be bound to an active driver when this returns */
+ status = device_add(&spi->dev);
+ if (status < 0)
+ dev_err(dev, "can't add %s, status %d\n",
+ dev_name(&spi->dev), status);
+ else
+ dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
+
+done:
+ mutex_unlock(&spi_add_lock);
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_add_device);
+
+/**
+ * spi_new_device - instantiate one new SPI device
+ * @master: Controller to which device is connected
+ * @chip: Describes the SPI device
+ * Context: can sleep
+ *
+ * On typical mainboards, this is purely internal; and it's not needed
+ * after board init creates the hard-wired devices. Some development
+ * platforms may not be able to use spi_register_board_info though, and
+ * this is exported so that for example a USB or parport based adapter
+ * driver could add devices (which it would learn about out-of-band).
+ *
+ * Returns the new device, or NULL.
+ */
+struct spi_device *spi_new_device(struct spi_master *master,
+ struct spi_board_info *chip)
+{
+ struct spi_device *proxy;
+ int status;
+
+ /* NOTE: caller did any chip->bus_num checks necessary.
+ *
+ * Also, unless we change the return value convention to use
+ * error-or-pointer (not NULL-or-pointer), troubleshootability
+ * suggests syslogged diagnostics are best here (ugh).
+ */
+
+ proxy = spi_alloc_device(master);
+ if (!proxy)
+ return NULL;
+
+ WARN_ON(strlen(chip->modalias) >= sizeof(proxy->modalias));
+
+ proxy->chip_select = chip->chip_select;
+ proxy->max_speed_hz = chip->max_speed_hz;
+ proxy->mode = chip->mode;
+ proxy->irq = chip->irq;
+ strlcpy(proxy->modalias, chip->modalias, sizeof(proxy->modalias));
+ proxy->dev.platform_data = (void *) chip->platform_data;
+ proxy->controller_data = chip->controller_data;
+ proxy->controller_state = NULL;
+
+ status = spi_add_device(proxy);
+ if (status < 0) {
+ spi_dev_put(proxy);
+ return NULL;
+ }
+
+ return proxy;
+}
+EXPORT_SYMBOL_GPL(spi_new_device);
+
+static void spi_match_master_to_boardinfo(struct spi_master *master,
+ struct spi_board_info *bi)
+{
+ struct spi_device *dev;
+
+ if (master->bus_num != bi->bus_num)
+ return;
+
+ dev = spi_new_device(master, bi);
+ if (!dev)
+ dev_err(master->dev.parent, "can't create new device for %s\n",
+ bi->modalias);
+}
+
+/**
+ * spi_register_board_info - register SPI devices for a given board
+ * @info: array of chip descriptors
+ * @n: how many descriptors are provided
+ * Context: can sleep
+ *
+ * Board-specific early init code calls this (probably during arch_initcall)
+ * with segments of the SPI device table. Any device nodes are created later,
+ * after the relevant parent SPI controller (bus_num) is defined. We keep
+ * this table of devices forever, so that reloading a controller driver will
+ * not make Linux forget about these hard-wired devices.
+ *
+ * Other code can also call this, e.g. a particular add-on board might provide
+ * SPI devices through its expansion connector, so code initializing that board
+ * would naturally declare its SPI devices.
+ *
+ * The board info passed can safely be __initdata ... but be careful of
+ * any embedded pointers (platform_data, etc), they're copied as-is.
+ */
+int spi_register_board_info(struct spi_board_info const *info, unsigned n)
+{
+ struct boardinfo *bi;
+ int i;
+
+ if (!n)
+ return -EINVAL;
+
+ bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
+ if (!bi)
+ return -ENOMEM;
+
+ for (i = 0; i < n; i++, bi++, info++) {
+ struct spi_master *master;
+
+ memcpy(&bi->board_info, info, sizeof(*info));
+ mutex_lock(&board_lock);
+ list_add_tail(&bi->list, &board_list);
+ list_for_each_entry(master, &spi_master_list, list)
+ spi_match_master_to_boardinfo(master, &bi->board_info);
+ mutex_unlock(&board_lock);
+ }
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------*/
+
+static void spi_set_cs(struct spi_device *spi, bool enable)
+{
+ if (spi->mode & SPI_CS_HIGH)
+ enable = !enable;
+
+ if (spi->cs_gpio >= 0)
+ gpio_set_value(spi->cs_gpio, !enable);
+ else if (spi->master->set_cs)
+ spi->master->set_cs(spi, !enable);
+}
+
+#ifdef CONFIG_HAS_DMA
+static int spi_map_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, void *buf, size_t len,
+ enum dma_data_direction dir)
+{
+ const bool vmalloced_buf = is_vmalloc_addr(buf);
+ const int desc_len = vmalloced_buf ? PAGE_SIZE : master->max_dma_len;
+ const int sgs = DIV_ROUND_UP(len, desc_len);
+ struct page *vm_page;
+ void *sg_buf;
+ size_t min;
+ int i, ret;
+
+ ret = sg_alloc_table(sgt, sgs, GFP_KERNEL);
+ if (ret != 0)
+ return ret;
+
+ for (i = 0; i < sgs; i++) {
+ min = min_t(size_t, len, desc_len);
+
+ if (vmalloced_buf) {
+ vm_page = vmalloc_to_page(buf);
+ if (!vm_page) {
+ sg_free_table(sgt);
+ return -ENOMEM;
+ }
+ sg_set_page(&sgt->sgl[i], vm_page,
+ min, offset_in_page(buf));
+ } else {
+ sg_buf = buf;
+ sg_set_buf(&sgt->sgl[i], sg_buf, min);
+ }
+
+
+ buf += min;
+ len -= min;
+ }
+
+ ret = dma_map_sg(dev, sgt->sgl, sgt->nents, dir);
+ if (!ret)
+ ret = -ENOMEM;
+ if (ret < 0) {
+ sg_free_table(sgt);
+ return ret;
+ }
+
+ sgt->nents = ret;
+
+ return 0;
+}
+
+static void spi_unmap_buf(struct spi_master *master, struct device *dev,
+ struct sg_table *sgt, enum dma_data_direction dir)
+{
+ if (sgt->orig_nents) {
+ dma_unmap_sg(dev, sgt->sgl, sgt->orig_nents, dir);
+ sg_free_table(sgt);
+ }
+}
+
+static int __spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct device *tx_dev, *rx_dev;
+ struct spi_transfer *xfer;
+ int ret;
+
+ if (!master->can_dma)
+ return 0;
+
+ tx_dev = master->dma_tx->device->dev;
+ rx_dev = master->dma_rx->device->dev;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ if (xfer->tx_buf != NULL) {
+ ret = spi_map_buf(master, tx_dev, &xfer->tx_sg,
+ (void *)xfer->tx_buf, xfer->len,
+ DMA_TO_DEVICE);
+ if (ret != 0)
+ return ret;
+ }
+
+ if (xfer->rx_buf != NULL) {
+ ret = spi_map_buf(master, rx_dev, &xfer->rx_sg,
+ xfer->rx_buf, xfer->len,
+ DMA_FROM_DEVICE);
+ if (ret != 0) {
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg,
+ DMA_TO_DEVICE);
+ return ret;
+ }
+ }
+ }
+
+ master->cur_msg_mapped = true;
+
+ return 0;
+}
+
+static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ struct device *tx_dev, *rx_dev;
+
+ if (!master->cur_msg_mapped || !master->can_dma)
+ return 0;
+
+ tx_dev = master->dma_tx->device->dev;
+ rx_dev = master->dma_rx->device->dev;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ /*
+ * Restore the original value of tx_buf or rx_buf if they are
+ * NULL.
+ */
+ if (xfer->tx_buf == master->dummy_tx)
+ xfer->tx_buf = NULL;
+ if (xfer->rx_buf == master->dummy_rx)
+ xfer->rx_buf = NULL;
+
+ if (!master->can_dma(master, msg->spi, xfer))
+ continue;
+
+ spi_unmap_buf(master, rx_dev, &xfer->rx_sg, DMA_FROM_DEVICE);
+ spi_unmap_buf(master, tx_dev, &xfer->tx_sg, DMA_TO_DEVICE);
+ }
+
+ return 0;
+}
+#else /* !CONFIG_HAS_DMA */
+static inline int __spi_map_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+
+static inline int spi_unmap_msg(struct spi_master *master,
+ struct spi_message *msg)
+{
+ return 0;
+}
+#endif /* !CONFIG_HAS_DMA */
+
+static int spi_map_msg(struct spi_master *master, struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ void *tmp;
+ unsigned int max_tx, max_rx;
+
+ if (master->flags & (SPI_MASTER_MUST_RX | SPI_MASTER_MUST_TX)) {
+ max_tx = 0;
+ max_rx = 0;
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ if ((master->flags & SPI_MASTER_MUST_TX) &&
+ !xfer->tx_buf)
+ max_tx = max(xfer->len, max_tx);
+ if ((master->flags & SPI_MASTER_MUST_RX) &&
+ !xfer->rx_buf)
+ max_rx = max(xfer->len, max_rx);
+ }
+
+ if (max_tx) {
+ tmp = krealloc(master->dummy_tx, max_tx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_tx = tmp;
+ memset(tmp, 0, max_tx);
+ }
+
+ if (max_rx) {
+ tmp = krealloc(master->dummy_rx, max_rx,
+ GFP_KERNEL | GFP_DMA);
+ if (!tmp)
+ return -ENOMEM;
+ master->dummy_rx = tmp;
+ }
+
+ if (max_tx || max_rx) {
+ list_for_each_entry(xfer, &msg->transfers,
+ transfer_list) {
+ if (!xfer->tx_buf)
+ xfer->tx_buf = master->dummy_tx;
+ if (!xfer->rx_buf)
+ xfer->rx_buf = master->dummy_rx;
+ }
+ }
+ }
+
+ return __spi_map_msg(master, msg);
+}
+
+/*
+ * spi_transfer_one_message - Default implementation of transfer_one_message()
+ *
+ * This is a standard implementation of transfer_one_message() for
+ * drivers which impelment a transfer_one() operation. It provides
+ * standard handling of delays and chip select management.
+ */
+static int spi_transfer_one_message(struct spi_master *master,
+ struct spi_message *msg)
+{
+ struct spi_transfer *xfer;
+ bool keep_cs = false;
+ int ret = 0;
+ unsigned long ms = 1;
+
+ spi_set_cs(msg->spi, true);
+
+ list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+ trace_spi_transfer_start(msg, xfer);
+
+ if (xfer->tx_buf || xfer->rx_buf) {
+ reinit_completion(&master->xfer_completion);
+
+ ret = master->transfer_one(master, msg->spi, xfer);
+ if (ret < 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer failed: %d\n", ret);
+ goto out;
+ }
+
+ if (ret > 0) {
+ ret = 0;
+ ms = xfer->len * 8 * 1000 / xfer->speed_hz;
+ ms += ms + 100; /* some tolerance */
+
+ ms = wait_for_completion_timeout(&master->xfer_completion,
+ msecs_to_jiffies(ms));
+ }
+
+ if (ms == 0) {
+ dev_err(&msg->spi->dev,
+ "SPI transfer timed out\n");
+ msg->status = -ETIMEDOUT;
+ }
+ } else {
+ if (xfer->len)
+ dev_err(&msg->spi->dev,
+ "Bufferless transfer has length %u\n",
+ xfer->len);
+ }
+
+ trace_spi_transfer_stop(msg, xfer);
+
+ if (msg->status != -EINPROGRESS)
+ goto out;
+
+ if (xfer->delay_usecs)
+ udelay(xfer->delay_usecs);
+
+ if (xfer->cs_change) {
+ if (list_is_last(&xfer->transfer_list,
+ &msg->transfers)) {
+ keep_cs = true;
+ } else {
+ spi_set_cs(msg->spi, false);
+ udelay(10);
+ spi_set_cs(msg->spi, true);
+ }
+ }
+
+ msg->actual_length += xfer->len;
+ }
+
+out:
+ if (ret != 0 || !keep_cs)
+ spi_set_cs(msg->spi, false);
+
+ if (msg->status == -EINPROGRESS)
+ msg->status = ret;
+
+ if (msg->status && master->handle_err)
+ master->handle_err(master, msg);
+
+ spi_finalize_current_message(master);
+
+ return ret;
+}
+
+/**
+ * spi_finalize_current_transfer - report completion of a transfer
+ * @master: the master reporting completion
+ *
+ * Called by SPI drivers using the core transfer_one_message()
+ * implementation to notify it that the current interrupt driven
+ * transfer has finished and the next one may be scheduled.
+ */
+void spi_finalize_current_transfer(struct spi_master *master)
+{
+ complete(&master->xfer_completion);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_transfer);
+
+/**
+ * __spi_pump_messages - function which processes spi message queue
+ * @master: master to process queue for
+ * @in_kthread: true if we are in the context of the message pump thread
+ *
+ * This function checks if there is any spi message in the queue that
+ * needs processing and if so call out to the driver to initialize hardware
+ * and transfer each message.
+ *
+ * Note that it is called both from the kthread itself and also from
+ * inside spi_sync(); the queue extraction handling at the top of the
+ * function should deal with this safely.
+ */
+static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
+{
+ unsigned long flags;
+ bool was_busy = false;
+ int ret;
+
+ /* Lock queue */
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ /* Make sure we are not already running a message */
+ if (master->cur_msg) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* If another context is idling the device then defer */
+ if (master->idling) {
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Check if the queue is idle */
+ if (list_empty(&master->queue) || !master->running) {
+ if (!master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Only do teardown in the thread */
+ if (!in_kthread) {
+ queue_kthread_work(&master->kworker,
+ &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ master->busy = false;
+ master->idling = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ kfree(master->dummy_rx);
+ master->dummy_rx = NULL;
+ kfree(master->dummy_tx);
+ master->dummy_tx = NULL;
+ if (master->unprepare_transfer_hardware &&
+ master->unprepare_transfer_hardware(master))
+ dev_err(&master->dev,
+ "failed to unprepare transfer hardware\n");
+ if (master->auto_runtime_pm) {
+ pm_runtime_mark_last_busy(master->dev.parent);
+ pm_runtime_put_autosuspend(master->dev.parent);
+ }
+ trace_spi_master_idle(master);
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ master->idling = false;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return;
+ }
+
+ /* Extract head of queue */
+ master->cur_msg =
+ list_first_entry(&master->queue, struct spi_message, queue);
+
+ list_del_init(&master->cur_msg->queue);
+ if (master->busy)
+ was_busy = true;
+ else
+ master->busy = true;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (!was_busy && master->auto_runtime_pm) {
+ ret = pm_runtime_get_sync(master->dev.parent);
+ if (ret < 0) {
+ dev_err(&master->dev, "Failed to power device: %d\n",
+ ret);
+ return;
+ }
+ }
+
+ if (!was_busy)
+ trace_spi_master_busy(master);
+
+ if (!was_busy && master->prepare_transfer_hardware) {
+ ret = master->prepare_transfer_hardware(master);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare transfer hardware\n");
+
+ if (master->auto_runtime_pm)
+ pm_runtime_put(master->dev.parent);
+ return;
+ }
+ }
+
+ trace_spi_message_start(master->cur_msg);
+
+ if (master->prepare_message) {
+ ret = master->prepare_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to prepare message: %d\n", ret);
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+ master->cur_msg_prepared = true;
+ }
+
+ ret = spi_map_msg(master, master->cur_msg);
+ if (ret) {
+ master->cur_msg->status = ret;
+ spi_finalize_current_message(master);
+ return;
+ }
+
+ ret = master->transfer_one_message(master, master->cur_msg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to transfer one message from queue\n");
+ return;
+ }
+}
+
+/**
+ * spi_pump_messages - kthread work function which processes spi message queue
+ * @work: pointer to kthread work struct contained in the master struct
+ */
+static void spi_pump_messages(struct kthread_work *work)
+{
+ struct spi_master *master =
+ container_of(work, struct spi_master, pump_messages);
+
+ __spi_pump_messages(master, true);
+}
+
+static int spi_init_queue(struct spi_master *master)
+{
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
+
+ master->running = false;
+ master->busy = false;
+
+ init_kthread_worker(&master->kworker);
+ master->kworker_task = kthread_run(kthread_worker_fn,
+ &master->kworker, "%s",
+ dev_name(&master->dev));
+ if (IS_ERR(master->kworker_task)) {
+ dev_err(&master->dev, "failed to create message pump task\n");
+ return PTR_ERR(master->kworker_task);
+ }
+ init_kthread_work(&master->pump_messages, spi_pump_messages);
+
+ /*
+ * Master config will indicate if this controller should run the
+ * message pump with high (realtime) priority to reduce the transfer
+ * latency on the bus by minimising the delay between a transfer
+ * request and the scheduling of the message pump thread. Without this
+ * setting the message pump thread will remain at default priority.
+ */
+ if (master->rt) {
+ dev_info(&master->dev,
+ "will run message pump with realtime priority\n");
+ sched_setscheduler(master->kworker_task, SCHED_FIFO, &param);
+ }
+
+ return 0;
+}
+
+/**
+ * spi_get_next_queued_message() - called by driver to check for queued
+ * messages
+ * @master: the master to check for queued messages
+ *
+ * If there are more messages in the queue, the next message is returned from
+ * this call.
+ */
+struct spi_message *spi_get_next_queued_message(struct spi_master *master)
+{
+ struct spi_message *next;
+ unsigned long flags;
+
+ /* get a pointer to the next message, if any */
+ spin_lock_irqsave(&master->queue_lock, flags);
+ next = list_first_entry_or_null(&master->queue, struct spi_message,
+ queue);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ return next;
+}
+EXPORT_SYMBOL_GPL(spi_get_next_queued_message);
+
+/**
+ * spi_finalize_current_message() - the current message is complete
+ * @master: the master to return the message to
+ *
+ * Called by the driver to notify the core that the message in the front of the
+ * queue is complete and can be removed from the queue.
+ */
+void spi_finalize_current_message(struct spi_master *master)
+{
+ struct spi_message *mesg;
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ mesg = master->cur_msg;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ spi_unmap_msg(master, mesg);
+
+ if (master->cur_msg_prepared && master->unprepare_message) {
+ ret = master->unprepare_message(master, mesg);
+ if (ret) {
+ dev_err(&master->dev,
+ "failed to unprepare message: %d\n", ret);
+ }
+ }
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+ master->cur_msg = NULL;
+ master->cur_msg_prepared = false;
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ trace_spi_message_done(mesg);
+
+ mesg->state = NULL;
+ if (mesg->complete)
+ mesg->complete(mesg->context);
+}
+EXPORT_SYMBOL_GPL(spi_finalize_current_message);
+
+static int spi_start_queue(struct spi_master *master)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (master->running || master->busy) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -EBUSY;
+ }
+
+ master->running = true;
+ master->cur_msg = NULL;
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ return 0;
+}
+
+static int spi_stop_queue(struct spi_master *master)
+{
+ unsigned long flags;
+ unsigned limit = 500;
+ int ret = 0;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ /*
+ * This is a bit lame, but is optimized for the common execution path.
+ * A wait_queue on the master->busy could be used, but then the common
+ * execution path (pump_messages) would be required to call wake_up or
+ * friends on every SPI message. Do this instead.
+ */
+ while ((!list_empty(&master->queue) || master->busy) && limit--) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ usleep_range(10000, 11000);
+ spin_lock_irqsave(&master->queue_lock, flags);
+ }
+
+ if (!list_empty(&master->queue) || master->busy)
+ ret = -EBUSY;
+ else
+ master->running = false;
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+
+ if (ret) {
+ dev_warn(&master->dev,
+ "could not stop message queue\n");
+ return ret;
+ }
+ return ret;
+}
+
+static int spi_destroy_queue(struct spi_master *master)
+{
+ int ret;
+
+ ret = spi_stop_queue(master);
+
+ /*
+ * flush_kthread_worker will block until all work is done.
+ * If the reason that stop_queue timed out is that the work will never
+ * finish, then it does no good to call flush/stop thread, so
+ * return anyway.
+ */
+ if (ret) {
+ dev_err(&master->dev, "problem destroying queue\n");
+ return ret;
+ }
+
+ flush_kthread_worker(&master->kworker);
+ kthread_stop(master->kworker_task);
+
+ return 0;
+}
+
+static int __spi_queued_transfer(struct spi_device *spi,
+ struct spi_message *msg,
+ bool need_pump)
+{
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+
+ spin_lock_irqsave(&master->queue_lock, flags);
+
+ if (!master->running) {
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return -ESHUTDOWN;
+ }
+ msg->actual_length = 0;
+ msg->status = -EINPROGRESS;
+
+ list_add_tail(&msg->queue, &master->queue);
+ if (!master->busy && need_pump)
+ queue_kthread_work(&master->kworker, &master->pump_messages);
+
+ spin_unlock_irqrestore(&master->queue_lock, flags);
+ return 0;
+}
+
+/**
+ * spi_queued_transfer - transfer function for queued transfers
+ * @spi: spi device which is requesting transfer
+ * @msg: spi message which is to handled is queued to driver queue
+ */
+static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
+{
+ return __spi_queued_transfer(spi, msg, true);
+}
+
+static int spi_master_initialize_queue(struct spi_master *master)
+{
+ int ret;
+
+ master->transfer = spi_queued_transfer;
+ if (!master->transfer_one_message)
+ master->transfer_one_message = spi_transfer_one_message;
+
+ /* Initialize and start queue */
+ ret = spi_init_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem initializing queue\n");
+ goto err_init_queue;
+ }
+ master->queued = true;
+ ret = spi_start_queue(master);
+ if (ret) {
+ dev_err(&master->dev, "problem starting queue\n");
+ goto err_start_queue;
+ }
+
+ return 0;
+
+err_start_queue:
+ spi_destroy_queue(master);
+err_init_queue:
+ return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_OF)
+static struct spi_device *
+of_register_spi_device(struct spi_master *master, struct device_node *nc)
+{
+ struct spi_device *spi;
+ int rc;
+ u32 value;
+
+ /* Alloc an spi_device */
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "spi_device alloc error for %s\n",
+ nc->full_name);
+ rc = -ENOMEM;
+ goto err_out;
+ }
+
+ /* Select device driver */
+ rc = of_modalias_node(nc, spi->modalias,
+ sizeof(spi->modalias));
+ if (rc < 0) {
+ dev_err(&master->dev, "cannot find modalias for %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ /* Device address */
+ rc = of_property_read_u32(nc, "reg", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'reg' property (%d)\n",
+ nc->full_name, rc);
+ goto err_out;
+ }
+ spi->chip_select = value;
+
+ /* Mode (clock phase/polarity/etc.) */
+ if (of_find_property(nc, "spi-cpha", NULL))
+ spi->mode |= SPI_CPHA;
+ if (of_find_property(nc, "spi-cpol", NULL))
+ spi->mode |= SPI_CPOL;
+ if (of_find_property(nc, "spi-cs-high", NULL))
+ spi->mode |= SPI_CS_HIGH;
+ if (of_find_property(nc, "spi-3wire", NULL))
+ spi->mode |= SPI_3WIRE;
+ if (of_find_property(nc, "spi-lsb-first", NULL))
+ spi->mode |= SPI_LSB_FIRST;
+
+ /* Device DUAL/QUAD mode */
+ if (!of_property_read_u32(nc, "spi-tx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_TX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_TX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-tx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ if (!of_property_read_u32(nc, "spi-rx-bus-width", &value)) {
+ switch (value) {
+ case 1:
+ break;
+ case 2:
+ spi->mode |= SPI_RX_DUAL;
+ break;
+ case 4:
+ spi->mode |= SPI_RX_QUAD;
+ break;
+ default:
+ dev_warn(&master->dev,
+ "spi-rx-bus-width %d not supported\n",
+ value);
+ break;
+ }
+ }
+
+ /* Device speed */
+ rc = of_property_read_u32(nc, "spi-max-frequency", &value);
+ if (rc) {
+ dev_err(&master->dev, "%s has no valid 'spi-max-frequency' property (%d)\n",
+ nc->full_name, rc);
+ goto err_out;
+ }
+ spi->max_speed_hz = value;
+
+ /* IRQ */
+ spi->irq = irq_of_parse_and_map(nc, 0);
+
+ /* Store a pointer to the node in the device structure */
+ of_node_get(nc);
+ spi->dev.of_node = nc;
+
+ /* Register the new device */
+ rc = spi_add_device(spi);
+ if (rc) {
+ dev_err(&master->dev, "spi_device register error %s\n",
+ nc->full_name);
+ goto err_out;
+ }
+
+ return spi;
+
+err_out:
+ spi_dev_put(spi);
+ return ERR_PTR(rc);
+}
+
+/**
+ * of_register_spi_devices() - Register child devices onto the SPI bus
+ * @master: Pointer to spi_master device
+ *
+ * Registers an spi_device for each child node of master node which has a 'reg'
+ * property.
+ */
+static void of_register_spi_devices(struct spi_master *master)
+{
+ struct spi_device *spi;
+ struct device_node *nc;
+
+ if (!master->dev.of_node)
+ return;
+
+ for_each_available_child_of_node(master->dev.of_node, nc) {
+ spi = of_register_spi_device(master, nc);
+ if (IS_ERR(spi))
+ dev_warn(&master->dev, "Failed to create SPI device for %s\n",
+ nc->full_name);
+ }
+}
+#else
+static void of_register_spi_devices(struct spi_master *master) { }
+#endif
+
+#ifdef CONFIG_ACPI
+static int acpi_spi_add_resource(struct acpi_resource *ares, void *data)
+{
+ struct spi_device *spi = data;
+
+ if (ares->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) {
+ struct acpi_resource_spi_serialbus *sb;
+
+ sb = &ares->data.spi_serial_bus;
+ if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_SPI) {
+ spi->chip_select = sb->device_selection;
+ spi->max_speed_hz = sb->connection_speed;
+
+ if (sb->clock_phase == ACPI_SPI_SECOND_PHASE)
+ spi->mode |= SPI_CPHA;
+ if (sb->clock_polarity == ACPI_SPI_START_HIGH)
+ spi->mode |= SPI_CPOL;
+ if (sb->device_polarity == ACPI_SPI_ACTIVE_HIGH)
+ spi->mode |= SPI_CS_HIGH;
+ }
+ } else if (spi->irq < 0) {
+ struct resource r;
+
+ if (acpi_dev_resource_interrupt(ares, 0, &r))
+ spi->irq = r.start;
+ }
+
+ /* Always tell the ACPI core to skip this resource */
+ return 1;
+}
+
+static acpi_status acpi_spi_add_device(acpi_handle handle, u32 level,
+ void *data, void **return_value)
+{
+ struct spi_master *master = data;
+ struct list_head resource_list;
+ struct acpi_device *adev;
+ struct spi_device *spi;
+ int ret;
+
+ if (acpi_bus_get_device(handle, &adev))
+ return AE_OK;
+ if (acpi_bus_get_status(adev) || !adev->status.present)
+ return AE_OK;
+
+ spi = spi_alloc_device(master);
+ if (!spi) {
+ dev_err(&master->dev, "failed to allocate SPI device for %s\n",
+ dev_name(&adev->dev));
+ return AE_NO_MEMORY;
+ }
+
+ ACPI_COMPANION_SET(&spi->dev, adev);
+ spi->irq = -1;
+
+ INIT_LIST_HEAD(&resource_list);
+ ret = acpi_dev_get_resources(adev, &resource_list,
+ acpi_spi_add_resource, spi);
+ acpi_dev_free_resource_list(&resource_list);
+
+ if (ret < 0 || !spi->max_speed_hz) {
+ spi_dev_put(spi);
+ return AE_OK;
+ }
+
+ adev->power.flags.ignore_parent = true;
+ strlcpy(spi->modalias, acpi_device_hid(adev), sizeof(spi->modalias));
+ if (spi_add_device(spi)) {
+ adev->power.flags.ignore_parent = false;
+ dev_err(&master->dev, "failed to add SPI device %s from ACPI\n",
+ dev_name(&adev->dev));
+ spi_dev_put(spi);
+ }
+
+ return AE_OK;
+}
+
+static void acpi_register_spi_devices(struct spi_master *master)
+{
+ acpi_status status;
+ acpi_handle handle;
+
+ handle = ACPI_HANDLE(master->dev.parent);
+ if (!handle)
+ return;
+
+ status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1,
+ acpi_spi_add_device, NULL,
+ master, NULL);
+ if (ACPI_FAILURE(status))
+ dev_warn(&master->dev, "failed to enumerate SPI slaves\n");
+}
+#else
+static inline void acpi_register_spi_devices(struct spi_master *master) {}
+#endif /* CONFIG_ACPI */
+
+static void spi_master_release(struct device *dev)
+{
+ struct spi_master *master;
+
+ master = container_of(dev, struct spi_master, dev);
+ kfree(master);
+}
+
+static struct class spi_master_class = {
+ .name = "spi_master",
+ .owner = THIS_MODULE,
+ .dev_release = spi_master_release,
+};
+
+
+
+/**
+ * spi_alloc_master - allocate SPI master controller
+ * @dev: the controller, possibly using the platform_bus
+ * @size: how much zeroed driver-private data to allocate; the pointer to this
+ * memory is in the driver_data field of the returned device,
+ * accessible with spi_master_get_devdata().
+ * Context: can sleep
+ *
+ * This call is used only by SPI master controller drivers, which are the
+ * only ones directly touching chip registers. It's how they allocate
+ * an spi_master structure, prior to calling spi_register_master().
+ *
+ * This must be called from context that can sleep. It returns the SPI
+ * master structure on success, else NULL.
+ *
+ * The caller is responsible for assigning the bus number and initializing
+ * the master's methods before calling spi_register_master(); and (after errors
+ * adding the device) calling spi_master_put() and kfree() to prevent a memory
+ * leak.
+ */
+struct spi_master *spi_alloc_master(struct device *dev, unsigned size)
+{
+ struct spi_master *master;
+
+ if (!dev)
+ return NULL;
+
+ master = kzalloc(size + sizeof(*master), GFP_KERNEL);
+ if (!master)
+ return NULL;
+
+ device_initialize(&master->dev);
+ master->bus_num = -1;
+ master->num_chipselect = 1;
+ master->dev.class = &spi_master_class;
+ master->dev.parent = get_device(dev);
+ spi_master_set_devdata(master, &master[1]);
+
+ return master;
+}
+EXPORT_SYMBOL_GPL(spi_alloc_master);
+
+#ifdef CONFIG_OF
+static int of_spi_register_master(struct spi_master *master)
+{
+ int nb, i, *cs;
+ struct device_node *np = master->dev.of_node;
+
+ if (!np)
+ return 0;
+
+ nb = of_gpio_named_count(np, "cs-gpios");
+ master->num_chipselect = max_t(int, nb, master->num_chipselect);
+
+ /* Return error only for an incorrectly formed cs-gpios property */
+ if (nb == 0 || nb == -ENOENT)
+ return 0;
+ else if (nb < 0)
+ return nb;
+
+ cs = devm_kzalloc(&master->dev,
+ sizeof(int) * master->num_chipselect,
+ GFP_KERNEL);
+ master->cs_gpios = cs;
+
+ if (!master->cs_gpios)
+ return -ENOMEM;
+
+ for (i = 0; i < master->num_chipselect; i++)
+ cs[i] = -ENOENT;
+
+ for (i = 0; i < nb; i++)
+ cs[i] = of_get_named_gpio(np, "cs-gpios", i);
+
+ return 0;
+}
+#else
+static int of_spi_register_master(struct spi_master *master)
+{
+ return 0;
+}
+#endif
+
+/**
+ * spi_register_master - register SPI master controller
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+ *
+ * SPI master controllers connect to their drivers using some non-SPI bus,
+ * such as the platform bus. The final stage of probe() in that code
+ * includes calling spi_register_master() to hook up to this SPI bus glue.
+ *
+ * SPI controllers use board specific (often SOC specific) bus numbers,
+ * and board-specific addressing for SPI devices combines those numbers
+ * with chip select numbers. Since SPI does not directly support dynamic
+ * device identification, boards need configuration tables telling which
+ * chip is at which address.
+ *
+ * This must be called from context that can sleep. It returns zero on
+ * success, else a negative error code (dropping the master's refcount).
+ * After a successful return, the caller is responsible for calling
+ * spi_unregister_master().
+ */
+int spi_register_master(struct spi_master *master)
+{
+ static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
+ struct device *dev = master->dev.parent;
+ struct boardinfo *bi;
+ int status = -ENODEV;
+ int dynamic = 0;
+
+ if (!dev)
+ return -ENODEV;
+
+ status = of_spi_register_master(master);
+ if (status)
+ return status;
+
+ /* even if it's just one always-selected device, there must
+ * be at least one chipselect
+ */
+ if (master->num_chipselect == 0)
+ return -EINVAL;
+
+ if ((master->bus_num < 0) && master->dev.of_node)
+ master->bus_num = of_alias_get_id(master->dev.of_node, "spi");
+
+ /* convention: dynamically assigned bus IDs count down from the max */
+ if (master->bus_num < 0) {
+ /* FIXME switch to an IDR based scheme, something like
+ * I2C now uses, so we can't run out of "dynamic" IDs
+ */
+ master->bus_num = atomic_dec_return(&dyn_bus_id);
+ dynamic = 1;
+ }
+
+ INIT_LIST_HEAD(&master->queue);
+ spin_lock_init(&master->queue_lock);
+ spin_lock_init(&master->bus_lock_spinlock);
+ mutex_init(&master->bus_lock_mutex);
+ master->bus_lock_flag = 0;
+ init_completion(&master->xfer_completion);
+ if (!master->max_dma_len)
+ master->max_dma_len = INT_MAX;
+
+ /* register the device, then userspace will see it.
+ * registration fails if the bus ID is in use.
+ */
+ dev_set_name(&master->dev, "spi%u", master->bus_num);
+ status = device_add(&master->dev);
+ if (status < 0)
+ goto done;
+ dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
+ dynamic ? " (dynamic)" : "");
+
+ /* If we're using a queued driver, start the queue */
+ if (master->transfer)
+ dev_info(dev, "master is unqueued, this is deprecated\n");
+ else {
+ status = spi_master_initialize_queue(master);
+ if (status) {
+ device_del(&master->dev);
+ goto done;
+ }
+ }
+
+ mutex_lock(&board_lock);
+ list_add_tail(&master->list, &spi_master_list);
+ list_for_each_entry(bi, &board_list, list)
+ spi_match_master_to_boardinfo(master, &bi->board_info);
+ mutex_unlock(&board_lock);
+
+ /* Register devices from the device tree and ACPI */
+ of_register_spi_devices(master);
+ acpi_register_spi_devices(master);
+done:
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_register_master);
+
+static void devm_spi_unregister(struct device *dev, void *res)
+{
+ spi_unregister_master(*(struct spi_master **)res);
+}
+
+/**
+ * dev_spi_register_master - register managed SPI master controller
+ * @dev: device managing SPI master
+ * @master: initialized master, originally from spi_alloc_master()
+ * Context: can sleep
+ *
+ * Register a SPI device as with spi_register_master() which will
+ * automatically be unregister
+ */
+int devm_spi_register_master(struct device *dev, struct spi_master *master)
+{
+ struct spi_master **ptr;
+ int ret;
+
+ ptr = devres_alloc(devm_spi_unregister, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return -ENOMEM;
+
+ ret = spi_register_master(master);
+ if (!ret) {
+ *ptr = master;
+ devres_add(dev, ptr);
+ } else {
+ devres_free(ptr);
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(devm_spi_register_master);
+
+static int __unregister(struct device *dev, void *null)
+{
+ spi_unregister_device(to_spi_device(dev));
+ return 0;
+}
+
+/**
+ * spi_unregister_master - unregister SPI master controller
+ * @master: the master being unregistered
+ * Context: can sleep
+ *
+ * This call is used only by SPI master controller drivers, which are the
+ * only ones directly touching chip registers.
+ *
+ * This must be called from context that can sleep.
+ */
+void spi_unregister_master(struct spi_master *master)
+{
+ int dummy;
+
+ if (master->queued) {
+ if (spi_destroy_queue(master))
+ dev_err(&master->dev, "queue remove failed\n");
+ }
+
+ mutex_lock(&board_lock);
+ list_del(&master->list);
+ mutex_unlock(&board_lock);
+
+ dummy = device_for_each_child(&master->dev, NULL, __unregister);
+ device_unregister(&master->dev);
+}
+EXPORT_SYMBOL_GPL(spi_unregister_master);
+
+int spi_master_suspend(struct spi_master *master)
+{
+ int ret;
+
+ /* Basically no-ops for non-queued masters */
+ if (!master->queued)
+ return 0;
+
+ ret = spi_stop_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue stop failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_suspend);
+
+int spi_master_resume(struct spi_master *master)
+{
+ int ret;
+
+ if (!master->queued)
+ return 0;
+
+ ret = spi_start_queue(master);
+ if (ret)
+ dev_err(&master->dev, "queue restart failed\n");
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_master_resume);
+
+static int __spi_master_match(struct device *dev, const void *data)
+{
+ struct spi_master *m;
+ const u16 *bus_num = data;
+
+ m = container_of(dev, struct spi_master, dev);
+ return m->bus_num == *bus_num;
+}
+
+/**
+ * spi_busnum_to_master - look up master associated with bus_num
+ * @bus_num: the master's bus number
+ * Context: can sleep
+ *
+ * This call may be used with devices that are registered after
+ * arch init time. It returns a refcounted pointer to the relevant
+ * spi_master (which the caller must release), or NULL if there is
+ * no such master registered.
+ */
+struct spi_master *spi_busnum_to_master(u16 bus_num)
+{
+ struct device *dev;
+ struct spi_master *master = NULL;
+
+ dev = class_find_device(&spi_master_class, NULL, &bus_num,
+ __spi_master_match);
+ if (dev)
+ master = container_of(dev, struct spi_master, dev);
+ /* reference got in class_find_device */
+ return master;
+}
+EXPORT_SYMBOL_GPL(spi_busnum_to_master);
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Core methods for SPI master protocol drivers. Some of the
+ * other core methods are currently defined as inline functions.
+ */
+
+/**
+ * spi_setup - setup SPI mode and clock rate
+ * @spi: the device whose settings are being modified
+ * Context: can sleep, and no requests are queued to the device
+ *
+ * SPI protocol drivers may need to update the transfer mode if the
+ * device doesn't work with its default. They may likewise need
+ * to update clock rates or word sizes from initial values. This function
+ * changes those settings, and must be called from a context that can sleep.
+ * Except for SPI_CS_HIGH, which takes effect immediately, the changes take
+ * effect the next time the device is selected and data is transferred to
+ * or from it. When this function returns, the spi device is deselected.
+ *
+ * Note that this call will fail if the protocol driver specifies an option
+ * that the underlying controller or its driver does not support. For
+ * example, not all hardware supports wire transfers using nine bit words,
+ * LSB-first wire encoding, or active-high chipselects.
+ */
+int spi_setup(struct spi_device *spi)
+{
+ unsigned bad_bits, ugly_bits;
+ int status = 0;
+
+ /* check mode to prevent that DUAL and QUAD set at the same time
+ */
+ if (((spi->mode & SPI_TX_DUAL) && (spi->mode & SPI_TX_QUAD)) ||
+ ((spi->mode & SPI_RX_DUAL) && (spi->mode & SPI_RX_QUAD))) {
+ dev_err(&spi->dev,
+ "setup: can not select dual and quad at the same time\n");
+ return -EINVAL;
+ }
+ /* if it is SPI_3WIRE mode, DUAL and QUAD should be forbidden
+ */
+ if ((spi->mode & SPI_3WIRE) && (spi->mode &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ /* help drivers fail *cleanly* when they need options
+ * that aren't supported with their current master
+ */
+ bad_bits = spi->mode & ~spi->master->mode_bits;
+ ugly_bits = bad_bits &
+ (SPI_TX_DUAL | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD);
+ if (ugly_bits) {
+ dev_warn(&spi->dev,
+ "setup: ignoring unsupported mode bits %x\n",
+ ugly_bits);
+ spi->mode &= ~ugly_bits;
+ bad_bits &= ~ugly_bits;
+ }
+ if (bad_bits) {
+ dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
+ bad_bits);
+ return -EINVAL;
+ }
+
+ if (!spi->bits_per_word)
+ spi->bits_per_word = 8;
+
+ if (!spi->max_speed_hz)
+ spi->max_speed_hz = spi->master->max_speed_hz;
+
+ spi_set_cs(spi, false);
+
+ if (spi->master->setup)
+ status = spi->master->setup(spi);
+
+ dev_dbg(&spi->dev, "setup mode %d, %s%s%s%s%u bits/w, %u Hz max --> %d\n",
+ (int) (spi->mode & (SPI_CPOL | SPI_CPHA)),
+ (spi->mode & SPI_CS_HIGH) ? "cs_high, " : "",
+ (spi->mode & SPI_LSB_FIRST) ? "lsb, " : "",
+ (spi->mode & SPI_3WIRE) ? "3wire, " : "",
+ (spi->mode & SPI_LOOP) ? "loopback, " : "",
+ spi->bits_per_word, spi->max_speed_hz,
+ status);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_setup);
+
+static int __spi_validate(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ struct spi_transfer *xfer;
+ int w_size;
+
+ if (list_empty(&message->transfers))
+ return -EINVAL;
+
+ /* Half-duplex links include original MicroWire, and ones with
+ * only one data pin like SPI_3WIRE (switches direction) or where
+ * either MOSI or MISO is missing. They can also be caused by
+ * software limitations.
+ */
+ if ((master->flags & SPI_MASTER_HALF_DUPLEX)
+ || (spi->mode & SPI_3WIRE)) {
+ unsigned flags = master->flags;
+
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ if (xfer->rx_buf && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_TX) && xfer->tx_buf)
+ return -EINVAL;
+ if ((flags & SPI_MASTER_NO_RX) && xfer->rx_buf)
+ return -EINVAL;
+ }
+ }
+
+ /**
+ * Set transfer bits_per_word and max speed as spi device default if
+ * it is not set for this transfer.
+ * Set transfer tx_nbits and rx_nbits as single transfer default
+ * (SPI_NBITS_SINGLE) if it is not set for this transfer.
+ */
+ list_for_each_entry(xfer, &message->transfers, transfer_list) {
+ message->frame_length += xfer->len;
+ if (!xfer->bits_per_word)
+ xfer->bits_per_word = spi->bits_per_word;
+
+ if (!xfer->speed_hz)
+ xfer->speed_hz = spi->max_speed_hz;
+
+ if (master->max_speed_hz &&
+ xfer->speed_hz > master->max_speed_hz)
+ xfer->speed_hz = master->max_speed_hz;
+
+ if (master->bits_per_word_mask) {
+ /* Only 32 bits fit in the mask */
+ if (xfer->bits_per_word > 32)
+ return -EINVAL;
+ if (!(master->bits_per_word_mask &
+ BIT(xfer->bits_per_word - 1)))
+ return -EINVAL;
+ }
+
+ /*
+ * SPI transfer length should be multiple of SPI word size
+ * where SPI word size should be power-of-two multiple
+ */
+ if (xfer->bits_per_word <= 8)
+ w_size = 1;
+ else if (xfer->bits_per_word <= 16)
+ w_size = 2;
+ else
+ w_size = 4;
+
+ /* No partial transfers accepted */
+ if (xfer->len % w_size)
+ return -EINVAL;
+
+ if (xfer->speed_hz && master->min_speed_hz &&
+ xfer->speed_hz < master->min_speed_hz)
+ return -EINVAL;
+
+ if (xfer->tx_buf && !xfer->tx_nbits)
+ xfer->tx_nbits = SPI_NBITS_SINGLE;
+ if (xfer->rx_buf && !xfer->rx_nbits)
+ xfer->rx_nbits = SPI_NBITS_SINGLE;
+ /* check transfer tx/rx_nbits:
+ * 1. check the value matches one of single, dual and quad
+ * 2. check tx/rx_nbits match the mode in spi_device
+ */
+ if (xfer->tx_buf) {
+ if (xfer->tx_nbits != SPI_NBITS_SINGLE &&
+ xfer->tx_nbits != SPI_NBITS_DUAL &&
+ xfer->tx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_TX_DUAL | SPI_TX_QUAD)))
+ return -EINVAL;
+ if ((xfer->tx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_TX_QUAD))
+ return -EINVAL;
+ }
+ /* check transfer rx_nbits */
+ if (xfer->rx_buf) {
+ if (xfer->rx_nbits != SPI_NBITS_SINGLE &&
+ xfer->rx_nbits != SPI_NBITS_DUAL &&
+ xfer->rx_nbits != SPI_NBITS_QUAD)
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_DUAL) &&
+ !(spi->mode & (SPI_RX_DUAL | SPI_RX_QUAD)))
+ return -EINVAL;
+ if ((xfer->rx_nbits == SPI_NBITS_QUAD) &&
+ !(spi->mode & SPI_RX_QUAD))
+ return -EINVAL;
+ }
+ }
+
+ message->status = -EINPROGRESS;
+
+ return 0;
+}
+
+static int __spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+
+ message->spi = spi;
+
+ trace_spi_message_submit(message);
+
+ return master->transfer(spi, message);
+}
+
+/**
+ * spi_async - asynchronous SPI transfer
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ */
+int spi_async(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ int ret;
+ unsigned long flags;
+
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ if (master->bus_lock_flag)
+ ret = -EBUSY;
+ else
+ ret = __spi_async(spi, message);
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(spi_async);
+
+/**
+ * spi_async_locked - version of spi_async with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers, including completion callback
+ * Context: any (irqs may be blocked, etc)
+ *
+ * This call may be used in_irq and other contexts which can't sleep,
+ * as well as from task contexts which can sleep.
+ *
+ * The completion callback is invoked in a context which can't sleep.
+ * Before that invocation, the value of message->status is undefined.
+ * When the callback is issued, message->status holds either zero (to
+ * indicate complete success) or a negative error code. After that
+ * callback returns, the driver which issued the transfer request may
+ * deallocate the associated memory; it's no longer in use by any SPI
+ * core or controller driver code.
+ *
+ * Note that although all messages to a spi_device are handled in
+ * FIFO order, messages may go to different devices in other orders.
+ * Some device might be higher priority, or have various "hard" access
+ * time requirements, for example.
+ *
+ * On detection of any fault during the transfer, processing of
+ * the entire message is aborted, and the device is deselected.
+ * Until returning from the associated message completion callback,
+ * no other spi_message queued to that device will be processed.
+ * (This rule applies equally to all the synchronous transfer calls,
+ * which are wrappers around this core asynchronous primitive.)
+ */
+int spi_async_locked(struct spi_device *spi, struct spi_message *message)
+{
+ struct spi_master *master = spi->master;
+ int ret;
+ unsigned long flags;
+
+ ret = __spi_validate(spi, message);
+ if (ret != 0)
+ return ret;
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ ret = __spi_async(spi, message);
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ return ret;
+
+}
+EXPORT_SYMBOL_GPL(spi_async_locked);
+
+
+/*-------------------------------------------------------------------------*/
+
+/* Utility methods for SPI master protocol drivers, layered on
+ * top of the core. Some other utility methods are defined as
+ * inline functions.
+ */
+
+static void spi_complete(void *arg)
+{
+ complete(arg);
+}
+
+static int __spi_sync(struct spi_device *spi, struct spi_message *message,
+ int bus_locked)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+ struct spi_master *master = spi->master;
+ unsigned long flags;
+
+ status = __spi_validate(spi, message);
+ if (status != 0)
+ return status;
+
+ message->complete = spi_complete;
+ message->context = &done;
+ message->spi = spi;
+
+ if (!bus_locked)
+ mutex_lock(&master->bus_lock_mutex);
+
+ /* If we're not using the legacy transfer method then we will
+ * try to transfer in the calling context so special case.
+ * This code would be less tricky if we could remove the
+ * support for driver implemented message queues.
+ */
+ if (master->transfer == spi_queued_transfer) {
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+
+ trace_spi_message_submit(message);
+
+ status = __spi_queued_transfer(spi, message, false);
+
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+ } else {
+ status = spi_async_locked(spi, message);
+ }
+
+ if (!bus_locked)
+ mutex_unlock(&master->bus_lock_mutex);
+
+ if (status == 0) {
+ /* Push out the messages in the calling context if we
+ * can.
+ */
+ if (master->transfer == spi_queued_transfer)
+ __spi_pump_messages(master, false);
+
+ wait_for_completion(&done);
+ status = message->status;
+ }
+ message->context = NULL;
+ return status;
+}
+
+/**
+ * spi_sync - blocking/synchronous SPI data transfers
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout. Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * Note that the SPI device's chip select is active during the message,
+ * and then is normally disabled between messages. Drivers for some
+ * frequently-used devices may want to minimize costs of selecting a chip,
+ * by leaving it selected in anticipation that the next message will go
+ * to the same chip. (That may increase power usage.)
+ *
+ * Also, the caller is guaranteeing that the memory associated with the
+ * message will not be freed before this call returns.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_sync(struct spi_device *spi, struct spi_message *message)
+{
+ return __spi_sync(spi, message, 0);
+}
+EXPORT_SYMBOL_GPL(spi_sync);
+
+/**
+ * spi_sync_locked - version of spi_sync with exclusive bus usage
+ * @spi: device with which data will be exchanged
+ * @message: describes the data transfers
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout. Low-overhead controller
+ * drivers may DMA directly into and out of the message buffers.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. It has to be preceded by a spi_bus_lock call. The SPI bus must
+ * be released by a spi_bus_unlock call when the exclusive access is over.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_sync_locked(struct spi_device *spi, struct spi_message *message)
+{
+ return __spi_sync(spi, message, 1);
+}
+EXPORT_SYMBOL_GPL(spi_sync_locked);
+
+/**
+ * spi_bus_lock - obtain a lock for exclusive SPI bus usage
+ * @master: SPI bus master that should be locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call should be used by drivers that require exclusive access to the
+ * SPI bus. The SPI bus must be released by a spi_bus_unlock call when the
+ * exclusive access is over. Data transfer must be done by spi_sync_locked
+ * and spi_async_locked calls when the SPI bus lock is held.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_bus_lock(struct spi_master *master)
+{
+ unsigned long flags;
+
+ mutex_lock(&master->bus_lock_mutex);
+
+ spin_lock_irqsave(&master->bus_lock_spinlock, flags);
+ master->bus_lock_flag = 1;
+ spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
+
+ /* mutex remains locked until spi_bus_unlock is called */
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_lock);
+
+/**
+ * spi_bus_unlock - release the lock for exclusive SPI bus usage
+ * @master: SPI bus master that was locked for exclusive bus access
+ * Context: can sleep
+ *
+ * This call may only be used from a context that may sleep. The sleep
+ * is non-interruptible, and has no timeout.
+ *
+ * This call releases an SPI bus lock previously obtained by an spi_bus_lock
+ * call.
+ *
+ * It returns zero on success, else a negative error code.
+ */
+int spi_bus_unlock(struct spi_master *master)
+{
+ master->bus_lock_flag = 0;
+
+ mutex_unlock(&master->bus_lock_mutex);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(spi_bus_unlock);
+
+/* portable code must never pass more than 32 bytes */
+#define SPI_BUFSIZ max(32, SMP_CACHE_BYTES)
+
+static u8 *buf;
+
+/**
+ * spi_write_then_read - SPI synchronous write followed by read
+ * @spi: device with which data will be exchanged
+ * @txbuf: data to be written (need not be dma-safe)
+ * @n_tx: size of txbuf, in bytes
+ * @rxbuf: buffer into which data will be read (need not be dma-safe)
+ * @n_rx: size of rxbuf, in bytes
+ * Context: can sleep
+ *
+ * This performs a half duplex MicroWire style transaction with the
+ * device, sending txbuf and then reading rxbuf. The return value
+ * is zero for success, else a negative errno status code.
+ * This call may only be used from a context that may sleep.
+ *
+ * Parameters to this routine are always copied using a small buffer;
+ * portable code should never use this for more than 32 bytes.
+ * Performance-sensitive or bulk transfer code should instead use
+ * spi_{async,sync}() calls with dma-safe buffers.
+ */
+int spi_write_then_read(struct spi_device *spi,
+ const void *txbuf, unsigned n_tx,
+ void *rxbuf, unsigned n_rx)
+{
+ static DEFINE_MUTEX(lock);
+
+ int status;
+ struct spi_message message;
+ struct spi_transfer x[2];
+ u8 *local_buf;
+
+ /* Use preallocated DMA-safe buffer if we can. We can't avoid
+ * copying here, (as a pure convenience thing), but we can
+ * keep heap costs out of the hot path unless someone else is
+ * using the pre-allocated buffer or the transfer is too large.
+ */
+ if ((n_tx + n_rx) > SPI_BUFSIZ || !mutex_trylock(&lock)) {
+ local_buf = kmalloc(max((unsigned)SPI_BUFSIZ, n_tx + n_rx),
+ GFP_KERNEL | GFP_DMA);
+ if (!local_buf)
+ return -ENOMEM;
+ } else {
+ local_buf = buf;
+ }
+
+ spi_message_init(&message);
+ memset(x, 0, sizeof(x));
+ if (n_tx) {
+ x[0].len = n_tx;
+ spi_message_add_tail(&x[0], &message);
+ }
+ if (n_rx) {
+ x[1].len = n_rx;
+ spi_message_add_tail(&x[1], &message);
+ }
+
+ memcpy(local_buf, txbuf, n_tx);
+ x[0].tx_buf = local_buf;
+ x[1].rx_buf = local_buf + n_tx;
+
+ /* do the i/o */
+ status = spi_sync(spi, &message);
+ if (status == 0)
+ memcpy(rxbuf, x[1].rx_buf, n_rx);
+
+ if (x[0].tx_buf == buf)
+ mutex_unlock(&lock);
+ else
+ kfree(local_buf);
+
+ return status;
+}
+EXPORT_SYMBOL_GPL(spi_write_then_read);
+
+/*-------------------------------------------------------------------------*/
+
+#if IS_ENABLED(CONFIG_OF_DYNAMIC)
+static int __spi_of_device_match(struct device *dev, void *data)
+{
+ return dev->of_node == data;
+}
+
+/* must call put_device() when done with returned spi_device device */
+static struct spi_device *of_find_spi_device_by_node(struct device_node *node)
+{
+ struct device *dev = bus_find_device(&spi_bus_type, NULL, node,
+ __spi_of_device_match);
+ return dev ? to_spi_device(dev) : NULL;
+}
+
+static int __spi_of_master_match(struct device *dev, const void *data)
+{
+ return dev->of_node == data;
+}
+
+/* the spi masters are not using spi_bus, so we find it with another way */
+static struct spi_master *of_find_spi_master_by_node(struct device_node *node)
+{
+ struct device *dev;
+
+ dev = class_find_device(&spi_master_class, NULL, node,
+ __spi_of_master_match);
+ if (!dev)
+ return NULL;
+
+ /* reference got in class_find_device */
+ return container_of(dev, struct spi_master, dev);
+}
+
+static int of_spi_notify(struct notifier_block *nb, unsigned long action,
+ void *arg)
+{
+ struct of_reconfig_data *rd = arg;
+ struct spi_master *master;
+ struct spi_device *spi;
+
+ switch (of_reconfig_get_state_change(action, arg)) {
+ case OF_RECONFIG_CHANGE_ADD:
+ master = of_find_spi_master_by_node(rd->dn->parent);
+ if (master == NULL)
+ return NOTIFY_OK; /* not for us */
+
+ spi = of_register_spi_device(master, rd->dn);
+ put_device(&master->dev);
+
+ if (IS_ERR(spi)) {
+ pr_err("%s: failed to create for '%s'\n",
+ __func__, rd->dn->full_name);
+ return notifier_from_errno(PTR_ERR(spi));
+ }
+ break;
+
+ case OF_RECONFIG_CHANGE_REMOVE:
+ /* find our device by node */
+ spi = of_find_spi_device_by_node(rd->dn);
+ if (spi == NULL)
+ return NOTIFY_OK; /* no? not meant for us */
+
+ /* unregister takes one ref away */
+ spi_unregister_device(spi);
+
+ /* and put the reference of the find */
+ put_device(&spi->dev);
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block spi_of_notifier = {
+ .notifier_call = of_spi_notify,
+};
+#else /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+extern struct notifier_block spi_of_notifier;
+#endif /* IS_ENABLED(CONFIG_OF_DYNAMIC) */
+
+static int __init spi_init(void)
+{
+ int status;
+
+ buf = kmalloc(SPI_BUFSIZ, GFP_KERNEL);
+ if (!buf) {
+ status = -ENOMEM;
+ goto err0;
+ }
+
+ status = bus_register(&spi_bus_type);
+ if (status < 0)
+ goto err1;
+
+ status = class_register(&spi_master_class);
+ if (status < 0)
+ goto err2;
+
+ if (IS_ENABLED(CONFIG_OF_DYNAMIC))
+ WARN_ON(of_reconfig_notifier_register(&spi_of_notifier));
+
+ return 0;
+
+err2:
+ bus_unregister(&spi_bus_type);
+err1:
+ kfree(buf);
+ buf = NULL;
+err0:
+ return status;
+}
+
+/* board_info is normally registered in arch_initcall(),
+ * but even essential drivers wait till later
+ *
+ * REVISIT only boardinfo really needs static linking. the rest (device and
+ * driver registration) _could_ be dynamically linked (modular) ... costs
+ * include needing to have boardinfo data structures be much more public.
+ */
+postcore_initcall(spi_init);
+
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
new file mode 100644
index 000000000..92c909eed
--- /dev/null
+++ b/drivers/spi/spidev.c
@@ -0,0 +1,856 @@
+/*
+ * Simple synchronous userspace interface to SPI devices
+ *
+ * Copyright (C) 2006 SWAPP
+ * Andrea Paterniani <a.paterniani@swapp-eng.it>
+ * Copyright (C) 2007 David Brownell (simplification, cleanup)
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/ioctl.h>
+#include <linux/fs.h>
+#include <linux/device.h>
+#include <linux/err.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/compat.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/spidev.h>
+
+#include <linux/uaccess.h>
+
+
+/*
+ * This supports access to SPI devices using normal userspace I/O calls.
+ * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
+ * and often mask message boundaries, full SPI support requires full duplex
+ * transfers. There are several kinds of internal message boundaries to
+ * handle chipselect management and other protocol options.
+ *
+ * SPI has a character major number assigned. We allocate minor numbers
+ * dynamically using a bitmask. You must use hotplug tools, such as udev
+ * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
+ * nodes, since there is no fixed association of minor numbers with any
+ * particular SPI bus or device.
+ */
+#define SPIDEV_MAJOR 153 /* assigned */
+#define N_SPI_MINORS 32 /* ... up to 256 */
+
+static DECLARE_BITMAP(minors, N_SPI_MINORS);
+
+
+/* Bit masks for spi_device.mode management. Note that incorrect
+ * settings for some settings can cause *lots* of trouble for other
+ * devices on a shared bus:
+ *
+ * - CS_HIGH ... this device will be active when it shouldn't be
+ * - 3WIRE ... when active, it won't behave as it should
+ * - NO_CS ... there will be no explicit message boundaries; this
+ * is completely incompatible with the shared bus model
+ * - READY ... transfers may proceed when they shouldn't.
+ *
+ * REVISIT should changing those flags be privileged?
+ */
+#define SPI_MODE_MASK (SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
+ | SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
+ | SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
+ | SPI_TX_QUAD | SPI_RX_DUAL | SPI_RX_QUAD)
+
+struct spidev_data {
+ dev_t devt;
+ spinlock_t spi_lock;
+ struct spi_device *spi;
+ struct list_head device_entry;
+
+ /* TX/RX buffers are NULL unless this device is open (users > 0) */
+ struct mutex buf_lock;
+ unsigned users;
+ u8 *tx_buffer;
+ u8 *rx_buffer;
+ u32 speed_hz;
+};
+
+static LIST_HEAD(device_list);
+static DEFINE_MUTEX(device_list_lock);
+
+static unsigned bufsiz = 4096;
+module_param(bufsiz, uint, S_IRUGO);
+MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
+
+/*-------------------------------------------------------------------------*/
+
+/*
+ * We can't use the standard synchronous wrappers for file I/O; we
+ * need to protect against async removal of the underlying spi_device.
+ */
+static void spidev_complete(void *arg)
+{
+ complete(arg);
+}
+
+static ssize_t
+spidev_sync(struct spidev_data *spidev, struct spi_message *message)
+{
+ DECLARE_COMPLETION_ONSTACK(done);
+ int status;
+
+ message->complete = spidev_complete;
+ message->context = &done;
+
+ spin_lock_irq(&spidev->spi_lock);
+ if (spidev->spi == NULL)
+ status = -ESHUTDOWN;
+ else
+ status = spi_async(spidev->spi, message);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (status == 0) {
+ wait_for_completion(&done);
+ status = message->status;
+ if (status == 0)
+ status = message->actual_length;
+ }
+ return status;
+}
+
+static inline ssize_t
+spidev_sync_write(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = spidev->tx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+static inline ssize_t
+spidev_sync_read(struct spidev_data *spidev, size_t len)
+{
+ struct spi_transfer t = {
+ .rx_buf = spidev->rx_buffer,
+ .len = len,
+ .speed_hz = spidev->speed_hz,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+ return spidev_sync(spidev, &m);
+}
+
+/*-------------------------------------------------------------------------*/
+
+/* Read-only message with current device setup */
+static ssize_t
+spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status = 0;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+ mutex_lock(&spidev->buf_lock);
+ status = spidev_sync_read(spidev, count);
+ if (status > 0) {
+ unsigned long missing;
+
+ missing = copy_to_user(buf, spidev->rx_buffer, status);
+ if (missing == status)
+ status = -EFAULT;
+ else
+ status = status - missing;
+ }
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+
+/* Write-only message with current device setup */
+static ssize_t
+spidev_write(struct file *filp, const char __user *buf,
+ size_t count, loff_t *f_pos)
+{
+ struct spidev_data *spidev;
+ ssize_t status = 0;
+ unsigned long missing;
+
+ /* chipselect only toggles at start or end of operation */
+ if (count > bufsiz)
+ return -EMSGSIZE;
+
+ spidev = filp->private_data;
+
+ mutex_lock(&spidev->buf_lock);
+ missing = copy_from_user(spidev->tx_buffer, buf, count);
+ if (missing == 0)
+ status = spidev_sync_write(spidev, count);
+ else
+ status = -EFAULT;
+ mutex_unlock(&spidev->buf_lock);
+
+ return status;
+}
+
+static int spidev_message(struct spidev_data *spidev,
+ struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
+{
+ struct spi_message msg;
+ struct spi_transfer *k_xfers;
+ struct spi_transfer *k_tmp;
+ struct spi_ioc_transfer *u_tmp;
+ unsigned n, total, tx_total, rx_total;
+ u8 *tx_buf, *rx_buf;
+ int status = -EFAULT;
+
+ spi_message_init(&msg);
+ k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
+ if (k_xfers == NULL)
+ return -ENOMEM;
+
+ /* Construct spi_message, copying any tx data to bounce buffer.
+ * We walk the array of user-provided transfers, using each one
+ * to initialize a kernel version of the same transfer.
+ */
+ tx_buf = spidev->tx_buffer;
+ rx_buf = spidev->rx_buffer;
+ total = 0;
+ tx_total = 0;
+ rx_total = 0;
+ for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
+ n;
+ n--, k_tmp++, u_tmp++) {
+ k_tmp->len = u_tmp->len;
+
+ total += k_tmp->len;
+ /* Since the function returns the total length of transfers
+ * on success, restrict the total to positive int values to
+ * avoid the return value looking like an error. Also check
+ * each transfer length to avoid arithmetic overflow.
+ */
+ if (total > INT_MAX || k_tmp->len > INT_MAX) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+
+ if (u_tmp->rx_buf) {
+ /* this transfer needs space in RX bounce buffer */
+ rx_total += k_tmp->len;
+ if (rx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->rx_buf = rx_buf;
+ if (!access_ok(VERIFY_WRITE, (u8 __user *)
+ (uintptr_t) u_tmp->rx_buf,
+ u_tmp->len))
+ goto done;
+ rx_buf += k_tmp->len;
+ }
+ if (u_tmp->tx_buf) {
+ /* this transfer needs space in TX bounce buffer */
+ tx_total += k_tmp->len;
+ if (tx_total > bufsiz) {
+ status = -EMSGSIZE;
+ goto done;
+ }
+ k_tmp->tx_buf = tx_buf;
+ if (copy_from_user(tx_buf, (const u8 __user *)
+ (uintptr_t) u_tmp->tx_buf,
+ u_tmp->len))
+ goto done;
+ tx_buf += k_tmp->len;
+ }
+
+ k_tmp->cs_change = !!u_tmp->cs_change;
+ k_tmp->tx_nbits = u_tmp->tx_nbits;
+ k_tmp->rx_nbits = u_tmp->rx_nbits;
+ k_tmp->bits_per_word = u_tmp->bits_per_word;
+ k_tmp->delay_usecs = u_tmp->delay_usecs;
+ k_tmp->speed_hz = u_tmp->speed_hz;
+ if (!k_tmp->speed_hz)
+ k_tmp->speed_hz = spidev->speed_hz;
+#ifdef VERBOSE
+ dev_dbg(&spidev->spi->dev,
+ " xfer len %zd %s%s%s%dbits %u usec %uHz\n",
+ u_tmp->len,
+ u_tmp->rx_buf ? "rx " : "",
+ u_tmp->tx_buf ? "tx " : "",
+ u_tmp->cs_change ? "cs " : "",
+ u_tmp->bits_per_word ? : spidev->spi->bits_per_word,
+ u_tmp->delay_usecs,
+ u_tmp->speed_hz ? : spidev->spi->max_speed_hz);
+#endif
+ spi_message_add_tail(k_tmp, &msg);
+ }
+
+ status = spidev_sync(spidev, &msg);
+ if (status < 0)
+ goto done;
+
+ /* copy any rx data out of bounce buffer */
+ rx_buf = spidev->rx_buffer;
+ for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
+ if (u_tmp->rx_buf) {
+ if (__copy_to_user((u8 __user *)
+ (uintptr_t) u_tmp->rx_buf, rx_buf,
+ u_tmp->len)) {
+ status = -EFAULT;
+ goto done;
+ }
+ rx_buf += u_tmp->len;
+ }
+ }
+ status = total;
+
+done:
+ kfree(k_xfers);
+ return status;
+}
+
+static struct spi_ioc_transfer *
+spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
+ unsigned *n_ioc)
+{
+ struct spi_ioc_transfer *ioc;
+ u32 tmp;
+
+ /* Check type, command number and direction */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
+ || _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
+ || _IOC_DIR(cmd) != _IOC_WRITE)
+ return ERR_PTR(-ENOTTY);
+
+ tmp = _IOC_SIZE(cmd);
+ if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
+ return ERR_PTR(-EINVAL);
+ *n_ioc = tmp / sizeof(struct spi_ioc_transfer);
+ if (*n_ioc == 0)
+ return NULL;
+
+ /* copy into scratch area */
+ ioc = kmalloc(tmp, GFP_KERNEL);
+ if (!ioc)
+ return ERR_PTR(-ENOMEM);
+ if (__copy_from_user(ioc, u_ioc, tmp)) {
+ kfree(ioc);
+ return ERR_PTR(-EFAULT);
+ }
+ return ioc;
+}
+
+static long
+spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ int err = 0;
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ u32 tmp;
+ unsigned n_ioc;
+ struct spi_ioc_transfer *ioc;
+
+ /* Check type and command number */
+ if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
+ return -ENOTTY;
+
+ /* Check access direction once here; don't repeat below.
+ * IOC_DIR is from the user perspective, while access_ok is
+ * from the kernel perspective; so they look reversed.
+ */
+ if (_IOC_DIR(cmd) & _IOC_READ)
+ err = !access_ok(VERIFY_WRITE,
+ (void __user *)arg, _IOC_SIZE(cmd));
+ if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE)
+ err = !access_ok(VERIFY_READ,
+ (void __user *)arg, _IOC_SIZE(cmd));
+ if (err)
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* use the buffer lock here for triple duty:
+ * - prevent I/O (from us) so calling spi_setup() is safe;
+ * - prevent concurrent SPI_IOC_WR_* from morphing
+ * data fields while SPI_IOC_RD_* reads them;
+ * - SPI_IOC_MESSAGE needs the buffer locked "normally".
+ */
+ mutex_lock(&spidev->buf_lock);
+
+ switch (cmd) {
+ /* read requests */
+ case SPI_IOC_RD_MODE:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MODE32:
+ retval = __put_user(spi->mode & SPI_MODE_MASK,
+ (__u32 __user *)arg);
+ break;
+ case SPI_IOC_RD_LSB_FIRST:
+ retval = __put_user((spi->mode & SPI_LSB_FIRST) ? 1 : 0,
+ (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_BITS_PER_WORD:
+ retval = __put_user(spi->bits_per_word, (__u8 __user *)arg);
+ break;
+ case SPI_IOC_RD_MAX_SPEED_HZ:
+ retval = __put_user(spidev->speed_hz, (__u32 __user *)arg);
+ break;
+
+ /* write requests */
+ case SPI_IOC_WR_MODE:
+ case SPI_IOC_WR_MODE32:
+ if (cmd == SPI_IOC_WR_MODE)
+ retval = __get_user(tmp, (u8 __user *)arg);
+ else
+ retval = __get_user(tmp, (u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp & ~SPI_MODE_MASK) {
+ retval = -EINVAL;
+ break;
+ }
+
+ tmp |= spi->mode & ~SPI_MODE_MASK;
+ spi->mode = (u16)tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "spi mode %x\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_LSB_FIRST:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->mode;
+
+ if (tmp)
+ spi->mode |= SPI_LSB_FIRST;
+ else
+ spi->mode &= ~SPI_LSB_FIRST;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->mode = save;
+ else
+ dev_dbg(&spi->dev, "%csb first\n",
+ tmp ? 'l' : 'm');
+ }
+ break;
+ case SPI_IOC_WR_BITS_PER_WORD:
+ retval = __get_user(tmp, (__u8 __user *)arg);
+ if (retval == 0) {
+ u8 save = spi->bits_per_word;
+
+ spi->bits_per_word = tmp;
+ retval = spi_setup(spi);
+ if (retval < 0)
+ spi->bits_per_word = save;
+ else
+ dev_dbg(&spi->dev, "%d bits per word\n", tmp);
+ }
+ break;
+ case SPI_IOC_WR_MAX_SPEED_HZ:
+ retval = __get_user(tmp, (__u32 __user *)arg);
+ if (retval == 0) {
+ u32 save = spi->max_speed_hz;
+
+ spi->max_speed_hz = tmp;
+ retval = spi_setup(spi);
+ if (retval >= 0)
+ spidev->speed_hz = tmp;
+ else
+ dev_dbg(&spi->dev, "%d Hz (max)\n", tmp);
+ spi->max_speed_hz = save;
+ }
+ break;
+
+ default:
+ /* segmented and/or full-duplex I/O request */
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd,
+ (struct spi_ioc_transfer __user *)arg, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ break;
+ }
+ if (!ioc)
+ break; /* n_ioc is also 0 */
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+ break;
+ }
+
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+#ifdef CONFIG_COMPAT
+static long
+spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct spi_ioc_transfer __user *u_ioc;
+ int retval = 0;
+ struct spidev_data *spidev;
+ struct spi_device *spi;
+ unsigned n_ioc, n;
+ struct spi_ioc_transfer *ioc;
+
+ u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
+ if (!access_ok(VERIFY_READ, u_ioc, _IOC_SIZE(cmd)))
+ return -EFAULT;
+
+ /* guard against device removal before, or while,
+ * we issue this ioctl.
+ */
+ spidev = filp->private_data;
+ spin_lock_irq(&spidev->spi_lock);
+ spi = spi_dev_get(spidev->spi);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (spi == NULL)
+ return -ESHUTDOWN;
+
+ /* SPI_IOC_MESSAGE needs the buffer locked "normally" */
+ mutex_lock(&spidev->buf_lock);
+
+ /* Check message and copy into scratch area */
+ ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
+ if (IS_ERR(ioc)) {
+ retval = PTR_ERR(ioc);
+ goto done;
+ }
+ if (!ioc)
+ goto done; /* n_ioc is also 0 */
+
+ /* Convert buffer pointers */
+ for (n = 0; n < n_ioc; n++) {
+ ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
+ ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
+ }
+
+ /* translate to spi_message, execute */
+ retval = spidev_message(spidev, ioc, n_ioc);
+ kfree(ioc);
+
+done:
+ mutex_unlock(&spidev->buf_lock);
+ spi_dev_put(spi);
+ return retval;
+}
+
+static long
+spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+{
+ if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
+ && _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
+ && _IOC_DIR(cmd) == _IOC_WRITE)
+ return spidev_compat_ioc_message(filp, cmd, arg);
+
+ return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define spidev_compat_ioctl NULL
+#endif /* CONFIG_COMPAT */
+
+static int spidev_open(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev;
+ int status = -ENXIO;
+
+ mutex_lock(&device_list_lock);
+
+ list_for_each_entry(spidev, &device_list, device_entry) {
+ if (spidev->devt == inode->i_rdev) {
+ status = 0;
+ break;
+ }
+ }
+
+ if (status) {
+ pr_debug("spidev: nothing for minor %d\n", iminor(inode));
+ goto err_find_dev;
+ }
+
+ if (!spidev->tx_buffer) {
+ spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->tx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_find_dev;
+ }
+ }
+
+ if (!spidev->rx_buffer) {
+ spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
+ if (!spidev->rx_buffer) {
+ dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
+ status = -ENOMEM;
+ goto err_alloc_rx_buf;
+ }
+ }
+
+ spidev->users++;
+ filp->private_data = spidev;
+ nonseekable_open(inode, filp);
+
+ mutex_unlock(&device_list_lock);
+ return 0;
+
+err_alloc_rx_buf:
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+err_find_dev:
+ mutex_unlock(&device_list_lock);
+ return status;
+}
+
+static int spidev_release(struct inode *inode, struct file *filp)
+{
+ struct spidev_data *spidev;
+ int status = 0;
+
+ mutex_lock(&device_list_lock);
+ spidev = filp->private_data;
+ filp->private_data = NULL;
+
+ /* last close? */
+ spidev->users--;
+ if (!spidev->users) {
+ int dofree;
+
+ kfree(spidev->tx_buffer);
+ spidev->tx_buffer = NULL;
+
+ kfree(spidev->rx_buffer);
+ spidev->rx_buffer = NULL;
+
+ spidev->speed_hz = spidev->spi->max_speed_hz;
+
+ /* ... after we unbound from the underlying device? */
+ spin_lock_irq(&spidev->spi_lock);
+ dofree = (spidev->spi == NULL);
+ spin_unlock_irq(&spidev->spi_lock);
+
+ if (dofree)
+ kfree(spidev);
+ }
+ mutex_unlock(&device_list_lock);
+
+ return status;
+}
+
+static const struct file_operations spidev_fops = {
+ .owner = THIS_MODULE,
+ /* REVISIT switch to aio primitives, so that userspace
+ * gets more complete API coverage. It'll simplify things
+ * too, except for the locking.
+ */
+ .write = spidev_write,
+ .read = spidev_read,
+ .unlocked_ioctl = spidev_ioctl,
+ .compat_ioctl = spidev_compat_ioctl,
+ .open = spidev_open,
+ .release = spidev_release,
+ .llseek = no_llseek,
+};
+
+/*-------------------------------------------------------------------------*/
+
+/* The main reason to have this class is to make mdev/udev create the
+ * /dev/spidevB.C character device nodes exposing our userspace API.
+ * It also simplifies memory management.
+ */
+
+static struct class *spidev_class;
+
+#ifdef CONFIG_OF
+static const struct of_device_id spidev_dt_ids[] = {
+ { .compatible = "rohm,dh2228fv" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, spidev_dt_ids);
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int spidev_probe(struct spi_device *spi)
+{
+ struct spidev_data *spidev;
+ int status;
+ unsigned long minor;
+
+ /*
+ * spidev should never be referenced in DT without a specific
+ * compatbile string, it is a Linux implementation thing
+ * rather than a description of the hardware.
+ */
+ if (spi->dev.of_node && !of_match_device(spidev_dt_ids, &spi->dev)) {
+ dev_err(&spi->dev, "buggy DT: spidev listed directly in DT\n");
+ WARN_ON(spi->dev.of_node &&
+ !of_match_device(spidev_dt_ids, &spi->dev));
+ }
+
+ /* Allocate driver data */
+ spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
+ if (!spidev)
+ return -ENOMEM;
+
+ /* Initialize the driver data */
+ spidev->spi = spi;
+ spin_lock_init(&spidev->spi_lock);
+ mutex_init(&spidev->buf_lock);
+
+ INIT_LIST_HEAD(&spidev->device_entry);
+
+ /* If we can allocate a minor number, hook up this device.
+ * Reusing minors is fine so long as udev or mdev is working.
+ */
+ mutex_lock(&device_list_lock);
+ minor = find_first_zero_bit(minors, N_SPI_MINORS);
+ if (minor < N_SPI_MINORS) {
+ struct device *dev;
+
+ spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
+ dev = device_create(spidev_class, &spi->dev, spidev->devt,
+ spidev, "spidev%d.%d",
+ spi->master->bus_num, spi->chip_select);
+ status = PTR_ERR_OR_ZERO(dev);
+ } else {
+ dev_dbg(&spi->dev, "no minor number available!\n");
+ status = -ENODEV;
+ }
+ if (status == 0) {
+ set_bit(minor, minors);
+ list_add(&spidev->device_entry, &device_list);
+ }
+ mutex_unlock(&device_list_lock);
+
+ spidev->speed_hz = spi->max_speed_hz;
+
+ if (status == 0)
+ spi_set_drvdata(spi, spidev);
+ else
+ kfree(spidev);
+
+ return status;
+}
+
+static int spidev_remove(struct spi_device *spi)
+{
+ struct spidev_data *spidev = spi_get_drvdata(spi);
+
+ /* make sure ops on existing fds can abort cleanly */
+ spin_lock_irq(&spidev->spi_lock);
+ spidev->spi = NULL;
+ spin_unlock_irq(&spidev->spi_lock);
+
+ /* prevent new opens */
+ mutex_lock(&device_list_lock);
+ list_del(&spidev->device_entry);
+ device_destroy(spidev_class, spidev->devt);
+ clear_bit(MINOR(spidev->devt), minors);
+ if (spidev->users == 0)
+ kfree(spidev);
+ mutex_unlock(&device_list_lock);
+
+ return 0;
+}
+
+static struct spi_driver spidev_spi_driver = {
+ .driver = {
+ .name = "spidev",
+ .owner = THIS_MODULE,
+ .of_match_table = of_match_ptr(spidev_dt_ids),
+ },
+ .probe = spidev_probe,
+ .remove = spidev_remove,
+
+ /* NOTE: suspend/resume methods are not necessary here.
+ * We don't do anything except pass the requests to/from
+ * the underlying controller. The refrigerator handles
+ * most issues; the controller driver handles the rest.
+ */
+};
+
+/*-------------------------------------------------------------------------*/
+
+static int __init spidev_init(void)
+{
+ int status;
+
+ /* Claim our 256 reserved device numbers. Then register a class
+ * that will key udev/mdev to add/remove /dev nodes. Last, register
+ * the driver which manages those device numbers.
+ */
+ BUILD_BUG_ON(N_SPI_MINORS > 256);
+ status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
+ if (status < 0)
+ return status;
+
+ spidev_class = class_create(THIS_MODULE, "spidev");
+ if (IS_ERR(spidev_class)) {
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ return PTR_ERR(spidev_class);
+ }
+
+ status = spi_register_driver(&spidev_spi_driver);
+ if (status < 0) {
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+ }
+ return status;
+}
+module_init(spidev_init);
+
+static void __exit spidev_exit(void)
+{
+ spi_unregister_driver(&spidev_spi_driver);
+ class_destroy(spidev_class);
+ unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
+}
+module_exit(spidev_exit);
+
+MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
+MODULE_DESCRIPTION("User mode SPI device interface");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("spi:spidev");